code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__snake_case :List[str] = '''src/transformers'''
__snake_case :Tuple = '''docs/source/en'''
__snake_case :Dict = '''.'''
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.readlines()
# Find the start prompt.
__a = 0
while not lines[start_index].startswith(_UpperCAmelCase ):
start_index += 1
start_index += 1
__a = start_index
while not lines[end_index].startswith(_UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__snake_case :Union[str, Any] = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
__snake_case :Union[str, Any] = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
__snake_case :int = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__snake_case :Tuple = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
__snake_case :str = direct_transformers_import(TRANSFORMERS_PATH)
def __snake_case ( _UpperCAmelCase ):
__a = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , _UpperCAmelCase )
return [m.group(0 ) for m in matches]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = 2 if text == '''✅''' or text == '''❌''' else len(_UpperCAmelCase )
__a = (width - text_length) // 2
__a = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __snake_case ( ):
__a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__a = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__a = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__a = collections.defaultdict(_UpperCAmelCase )
__a = collections.defaultdict(_UpperCAmelCase )
__a = collections.defaultdict(_UpperCAmelCase )
__a = collections.defaultdict(_UpperCAmelCase )
__a = collections.defaultdict(_UpperCAmelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_UpperCAmelCase ):
__a = None
if attr_name.endswith('''Tokenizer''' ):
__a = slow_tokenizers
__a = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
__a = fast_tokenizers
__a = attr_name[:-13]
elif _re_tf_models.match(_UpperCAmelCase ) is not None:
__a = tf_models
__a = _re_tf_models.match(_UpperCAmelCase ).groups()[0]
elif _re_flax_models.match(_UpperCAmelCase ) is not None:
__a = flax_models
__a = _re_flax_models.match(_UpperCAmelCase ).groups()[0]
elif _re_pt_models.match(_UpperCAmelCase ) is not None:
__a = pt_models
__a = _re_pt_models.match(_UpperCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(_UpperCAmelCase ) > 0:
if attr_name in model_name_to_prefix.values():
__a = True
break
# Try again after removing the last word in the name
__a = ''''''.join(camel_case_split(_UpperCAmelCase )[:-1] )
# Let's build that table!
__a = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__a = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__a = [len(_UpperCAmelCase ) + 2 for c in columns]
__a = max([len(_UpperCAmelCase ) for name in model_names] ) + 2
# Build the table per se
__a = '''|''' + '''|'''.join([_center_text(_UpperCAmelCase , _UpperCAmelCase ) for c, w in zip(_UpperCAmelCase , _UpperCAmelCase )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
__a = {True: '''✅''', False: '''❌'''}
for name in model_names:
__a = model_name_to_prefix[name]
__a = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_UpperCAmelCase , _UpperCAmelCase ) for l, w in zip(_UpperCAmelCase , _UpperCAmelCase )] ) + "|\n"
return table
def __snake_case ( _UpperCAmelCase=False ):
__a , __a , __a , __a = _find_text_in_file(
filename=os.path.join(_UpperCAmelCase , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
__a = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_UpperCAmelCase , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
__snake_case :str = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case :List[str] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 49
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case :Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case :List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 )
__a = parent_a[:random_slice] + parent_a[random_slice:]
__a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = []
# Generate more children proportionally to the fitness score.
__a = int(parent_a[1] * 100 ) + 1
__a = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
__a = population_score[random.randint(0 , _UpperCAmelCase )][0]
__a , __a = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
__a = []
for _ in range(_UpperCAmelCase ):
population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__a = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case :Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case :List[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 49
| 1
|
def __snake_case ( _UpperCAmelCase ):
__a = len(_UpperCAmelCase )
for i in range(length - 1 ):
__a = i
for k in range(i + 1 , _UpperCAmelCase ):
if collection[k] < collection[least]:
__a = k
if least != i:
__a , __a = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case :int = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case :Optional[int] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 49
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = LxmertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = LxmertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 49
| 1
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = tempfile.mkdtemp()
__a = 8
# DPR tok
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , '''dpr_tokenizer''')
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
__a = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
# BART tok
__a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__a = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
__a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__a = {'''unk_token''': '''<unk>'''}
__a = os.path.join(self.tmpdirname , '''bart_tokenizer''')
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
__a = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''])
__a = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer'''))
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer'''))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = os.path.join(self.tmpdirname , '''rag_tokenizer''')
__a = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict())
__a = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer())
rag_config.save_pretrained(__SCREAMING_SNAKE_CASE)
rag_tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE)
self.assertIsInstance(new_rag_tokenizer.question_encoder , __SCREAMING_SNAKE_CASE)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator , __SCREAMING_SNAKE_CASE)
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab())
@slow
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = RagTokenizer.from_pretrained('''facebook/rag-token-nq''')
__a = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__a = tokenizer(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''')
__a = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__a = tokenizer(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
| 49
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ):
__a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__a = f'{olid} is not a valid Open Library olid'
raise ValueError(_UpperCAmelCase )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def __snake_case ( _UpperCAmelCase ):
__a = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__a = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__a = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = ''', '''.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 49
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :Optional[Any] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__snake_case :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths}
__a = Text(
cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory)
return dataset
| 49
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = '''dandelin/vilt-b32-finetuned-vqa'''
UpperCamelCase__ : Optional[int] = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
UpperCamelCase__ : Optional[Any] = '''image_qa'''
UpperCamelCase__ : Optional[Any] = AutoProcessor
UpperCamelCase__ : Union[str, Any] = AutoModelForVisualQuestionAnswering
UpperCamelCase__ : Optional[int] = ['''image''', '''text''']
UpperCamelCase__ : Optional[Any] = ['''text''']
def __init__( self : int , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
requires_backends(self , ['''vision'''])
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : "Image" , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return self.pre_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
with torch.no_grad():
return self.model(**__SCREAMING_SNAKE_CASE).logits
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 49
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case :List[str] = '''\
Text data.
Second line of data.'''
__snake_case :Optional[Any] = '''file'''
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__a = bytes(_UpperCAmelCase , '''utf-8''' )
with zstd.open(_UpperCAmelCase , '''wb''' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__a = input_paths[compression_format]
__a = tmp_path / '''cache'''
__a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
__a = f.read()
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''custom_cache'''
__a = '''custom_extracted_dir'''
__a = tmp_path / '''custom_extracted_path'''
if default_extracted:
__a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) )
__a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a = xz_file
__a = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
__a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
__a = '''./__missing_file__.txt'''
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = get_from_cache(f'tmp://{tmpfs_file}' )
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( ):
with pytest.raises(_UpperCAmelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head('''s3://huggingface.co''' )
| 49
| 1
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : str=99 , __SCREAMING_SNAKE_CASE : Any=32 , __SCREAMING_SNAKE_CASE : List[str]=5 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : Any=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length])
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__a = ids_tensor([self.batch_size] , self.num_choices)
__a = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = DistilBertModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = DistilBertForMaskedLM(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = DistilBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = self.num_labels
__a = DistilBertForSequenceClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = self.num_labels
__a = DistilBertForTokenClassification(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = self.num_choices
__a = DistilBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__a = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Optional[int] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase__ : List[str] = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : str = True
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Union[str, Any] = True
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = DistilBertModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , dim=37)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = DistilBertModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@slow
@require_torch_gpu
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__a = True
__a = model_class(config=__SCREAMING_SNAKE_CASE)
__a = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = torch.jit.trace(
__SCREAMING_SNAKE_CASE , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu''')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , '''traced_model.pt'''))
__a = torch.jit.load(os.path.join(__SCREAMING_SNAKE_CASE , '''traced_model.pt''') , map_location=__SCREAMING_SNAKE_CASE)
loaded(inputs_dict['''input_ids'''].to(__SCREAMING_SNAKE_CASE) , inputs_dict['''attention_mask'''].to(__SCREAMING_SNAKE_CASE))
@require_torch
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = DistilBertModel.from_pretrained('''distilbert-base-uncased''')
__a = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]])
__a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0]
__a = torch.Size((1, 11, 768))
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 49
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE)
return config
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = self.dummy_sample_deter + 0.1
__a = self.dummy_sample_deter - 0.1
__a = samplea.shape[0]
__a = torch.stack([samplea, samplea, samplea] , dim=0)
__a = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE)
__a = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
__a = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 11_53.18_33) < 1E-2
assert abs(result_mean.item() - 0.50_05) < 1E-3
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_58.96_06) < 1E-2
assert abs(result_mean.item() - 0.33_72) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''')
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_02.02_96) < 1E-2
assert abs(result_mean.item() - 0.26_31) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
__a = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE):
if i == len(__SCREAMING_SNAKE_CASE) - 1:
__a = -1
else:
__a = timesteps[i + 1]
__a = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE)
__a = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
__a = len(__SCREAMING_SNAKE_CASE)
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
| 49
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case :List[Any] = logging.get_logger(__name__)
__snake_case :Dict = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = '''mobilenet_v2'''
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : int=224 , __SCREAMING_SNAKE_CASE : Optional[int]=1.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=8 , __SCREAMING_SNAKE_CASE : List[Any]=8 , __SCREAMING_SNAKE_CASE : int=6 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict="relu6" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=0.8 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : Dict=255 , **__SCREAMING_SNAKE_CASE : Optional[int] , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''')
__a = num_channels
__a = image_size
__a = depth_multiplier
__a = depth_divisible_by
__a = min_depth
__a = expand_ratio
__a = output_stride
__a = first_layer_is_expansion
__a = finegrained_output
__a = hidden_act
__a = tf_padding
__a = classifier_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = semantic_loss_ignore_index
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Any = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})])
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})])
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})])
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return 1E-4
| 49
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__snake_case :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
requires_backends(self , '''torch''')
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
self.check_model_type(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = {}
__a = {}
__a = {}
# preprocess args
if "points_per_batch" in kwargs:
__a = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__a = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__a = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__a = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__a = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__a = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__a = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__a = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__a = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__a = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__a = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__a = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
__a = self.image_processor.size['''longest_edge''']
__a , __a , __a , __a = self.image_processor.generate_crop_boxes(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
with self.device_placement():
if self.framework == "pt":
__a = self.get_inference_context()
with inference_context():
__a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values'''))
__a = image_embeddings
__a = grid_points.shape[1]
__a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''')
for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = grid_points[:, i : i + points_per_batch, :, :]
__a = input_labels[:, i : i + points_per_batch]
__a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ):
'''simple docstring'''
__a = model_inputs.pop('''input_boxes''')
__a = model_inputs.pop('''is_last''')
__a = model_inputs.pop('''original_sizes''').tolist()
__a = model_inputs.pop('''reshaped_input_sizes''').tolist()
__a = self.model(**__SCREAMING_SNAKE_CASE)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__a = model_outputs['''pred_masks''']
__a = self.image_processor.post_process_masks(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE)
__a = model_outputs['''iou_scores''']
__a , __a , __a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ):
'''simple docstring'''
__a = []
__a = []
__a = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores'''))
all_masks.extend(model_output.pop('''masks'''))
all_boxes.append(model_output.pop('''boxes'''))
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a , __a , __a , __a = self.image_processor.post_process_for_mask_generation(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = defaultdict(__SCREAMING_SNAKE_CASE)
for output in model_outputs:
for k, v in output.items():
extra[k].append(__SCREAMING_SNAKE_CASE)
__a = {}
if output_rle_mask:
__a = rle_mask
if output_bboxes_mask:
__a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 49
| 1
|
__snake_case :Union[str, Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__snake_case :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__snake_case :Optional[int] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 49
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__snake_case :str = logging.get_logger(__name__)
__snake_case :int = {'''vocab_file''': '''vocab.txt'''}
__snake_case :List[Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__snake_case :List[str] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__snake_case :Optional[int] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int = ConvBertTokenizer
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case
or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars
):
__a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type'''))
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__SCREAMING_SNAKE_CASE)
__a = do_lower_case
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None):
'''simple docstring'''
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
| 49
| 1
|
import qiskit
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__a = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(f'Total count for various states are: {single_qubit_measure(1, 1)}')
| 49
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Any = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__snake_case :List[Any] = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __snake_case ( _UpperCAmelCase ):
__a = EfficientNetConfig()
__a = CONFIG_MAP[model_name]['''hidden_dim''']
__a = CONFIG_MAP[model_name]['''width_coef''']
__a = CONFIG_MAP[model_name]['''depth_coef''']
__a = CONFIG_MAP[model_name]['''image_size''']
__a = CONFIG_MAP[model_name]['''dropout_rate''']
__a = CONFIG_MAP[model_name]['''dw_padding''']
__a = '''huggingface/label-files'''
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( ):
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
def __snake_case ( _UpperCAmelCase ):
__a = CONFIG_MAP[model_name]['''image_size''']
__a = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCAmelCase , )
return preprocessor
def __snake_case ( _UpperCAmelCase ):
__a = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
__a = sorted(set(_UpperCAmelCase ) )
__a = len(_UpperCAmelCase )
__a = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )}
__a = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
__a = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
__a = {}
for item in rename_keys:
if item[0] in original_param_names:
__a = '''efficientnet.''' + item[1]
__a = '''classifier.weight'''
__a = '''classifier.bias'''
return key_mapping
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__a = key_mapping[key]
if "_conv" in key and "kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__a = torch.from_numpy(np.transpose(_UpperCAmelCase ) )
else:
__a = torch.from_numpy(_UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCAmelCase )
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = model_classes[model_name](
include_top=_UpperCAmelCase , weights='''imagenet''' , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , )
__a = original_model.trainable_variables
__a = original_model.non_trainable_variables
__a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__a = param.numpy()
__a = list(tf_params.keys() )
# Load HuggingFace model
__a = get_efficientnet_config(_UpperCAmelCase )
__a = EfficientNetForImageClassification(_UpperCAmelCase ).eval()
__a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
__a = rename_keys(_UpperCAmelCase )
replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Initialize preprocessor and preprocess input image
__a = convert_image_processor(_UpperCAmelCase )
__a = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__a = hf_model(**_UpperCAmelCase )
__a = outputs.logits.detach().numpy()
# Original model inference
__a = False
__a = CONFIG_MAP[model_name]['''image_size''']
__a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__a = image.img_to_array(_UpperCAmelCase )
__a = np.expand_dims(_UpperCAmelCase , axis=0 )
__a = original_model.predict(_UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCAmelCase ):
os.mkdir(_UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCAmelCase )
preprocessor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'Pushing converted {model_name} to the hub...' )
__a = f'efficientnet-{model_name}'
preprocessor.push_to_hub(_UpperCAmelCase )
hf_model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__snake_case :Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 49
| 1
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__snake_case :List[str] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__snake_case :List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__snake_case :Dict = dict(zip(vocab, range(len(vocab))))
__snake_case :Optional[int] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case :Tuple = Path(tmpdirname)
__snake_case :Tuple = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__snake_case :List[str] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__snake_case :Tuple = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__snake_case :Tuple = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__snake_case :int = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__snake_case :Optional[Any] = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
__snake_case :List[Any] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__snake_case :Any = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 49
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case :Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
__snake_case :Tuple = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
__snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 49
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case :int = logging.get_logger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = ['''pixel_values''']
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : float = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
__a = size if size is not None else {'''shortest_edge''': 384}
__a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE)
__a = do_resize
__a = size
# Default value set here for backwards compatibility where the value in config is None
__a = crop_pct if crop_pct is not None else 224 / 256
__a = resample
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
__a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE)
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}')
__a = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__a = int(shortest_edge / crop_pct)
__a = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE)
__a = resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, float] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : float = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : float = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = crop_pct if crop_pct is not None else self.crop_pct
__a = resample if resample is not None else self.resample
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE)
__a = make_list_of_images(__SCREAMING_SNAKE_CASE)
if not valid_images(__SCREAMING_SNAKE_CASE):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
__a = [to_numpy_array(__SCREAMING_SNAKE_CASE) for image in images]
if do_resize:
__a = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , crop_pct=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE) for image in images]
if do_rescale:
__a = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE) for image in images]
if do_normalize:
__a = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE) for image in images]
__a = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for image in images]
__a = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE)
| 49
|
from collections import defaultdict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = first_str.lower().strip()
__a = second_str.lower().strip()
# Remove whitespace
__a = first_str.replace(''' ''' , '''''' )
__a = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
# Default values for count should be 0
__a = defaultdict(_UpperCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case :Any = input('''Enter the first string ''').strip()
__snake_case :int = input('''Enter the second string ''').strip()
__snake_case :int = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 49
| 1
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case :Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case :List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 )
__a = parent_a[:random_slice] + parent_a[random_slice:]
__a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = []
# Generate more children proportionally to the fitness score.
__a = int(parent_a[1] * 100 ) + 1
__a = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
__a = population_score[random.randint(0 , _UpperCAmelCase )][0]
__a , __a = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
__a = []
for _ in range(_UpperCAmelCase ):
population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__a = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case :Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case :List[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 49
|
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case :Any = logging.getLogger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = '''masked_bert'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=30_522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : List[str]="topK" , __SCREAMING_SNAKE_CASE : List[Any]="constant" , __SCREAMING_SNAKE_CASE : int=0.0 , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = pruning_method
__a = mask_init
__a = mask_scale
| 49
| 1
|
__snake_case :Any = '''Tobias Carryer'''
from time import time
class _A :
def __init__( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int=int(time())): # noqa: B008
'''simple docstring'''
__a = multiplier
__a = increment
__a = modulo
__a = seed
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case :str = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 49
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _A :
UpperCamelCase__ : Optional[Union[str, Path]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = 1
UpperCamelCase__ : Optional[Union[str, bool]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
| 49
| 1
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
__a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def __snake_case ( _UpperCAmelCase ):
__a = ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(_UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : PreTrainedTokenizerBase
UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = -100
UpperCamelCase__ : str = "pt"
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids''']).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 49
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Union[str, Any] = logging.get_logger(__name__)
__snake_case :Any = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''switch_transformers'''
UpperCamelCase__ : Optional[Any] = ['''past_key_values''']
UpperCamelCase__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=32_128 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict="float32" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : List[str]=0.0_01 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
__a = vocab_size
__a = d_model
__a = d_kv
__a = d_ff
__a = num_sparse_encoder_layers
__a = num_layers
__a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__a = self.num_layers // self.num_sparse_encoder_layers
else:
__a = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__a = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__a = self.num_decoder_layers # HACK: this will create 0 sparse layers
__a = num_heads
__a = num_experts
__a = expert_capacity
__a = router_bias
__a = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}')
__a = router_dtype
__a = router_ignore_padding_tokens
__a = relative_attention_num_buckets
__a = relative_attention_max_distance
__a = dropout_rate
__a = layer_norm_epsilon
__a = initializer_factor
__a = feed_forward_proj
__a = use_cache
__a = add_router_probs
__a = router_z_loss_coef
__a = router_aux_loss_coef
__a = self.feed_forward_proj.split('''-''')
__a = act_info[-1]
__a = act_info[0] == '''gated'''
if len(__SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__a = '''gelu_new'''
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 49
| 1
|
import math
def __snake_case ( _UpperCAmelCase ):
__a = [True] * n
__a = False
__a = False
__a = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__a = i * 2
while index < n:
__a = False
__a = index + i
__a = [2]
for i in range(3 , _UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def __snake_case ( _UpperCAmelCase = 999966663333 ):
__a = math.floor(math.sqrt(_UpperCAmelCase ) ) + 100
__a = prime_sieve(_UpperCAmelCase )
__a = 0
__a = 0
__a = primes[prime_index]
while (last_prime**2) <= limit:
__a = primes[prime_index + 1]
__a = last_prime**2
__a = next_prime**2
# Get numbers divisible by lps(current)
__a = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__a = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__a = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__a = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 49
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__snake_case :List[Any] = logging.getLogger(__name__)
class _A :
def __init__( self : List[str]):
'''simple docstring'''
__a = False
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
if not self.initialized:
__a = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = True
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.retriever.index.init_index()
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a , __a = self.retriever._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return doc_ids, retrieved_doc_embeds
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None):
'''simple docstring'''
if index is not None and index.is_initialized() and len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''')
super().__init__(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for worker in self.retrieval_workers
])
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
logger.info('''initializing retrieval''')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
__a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
__a , __a = ray.get(random_worker.retrieve.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
else:
__a , __a = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return super(__SCREAMING_SNAKE_CASE , cls).get_tokenizers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE) or RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE)
__a = rag_tokenizer.question_encoder
__a = rag_tokenizer.generator
if indexed_dataset is not None:
__a = '''custom'''
__a = CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE)
else:
__a = cls._build_index(__SCREAMING_SNAKE_CASE)
return cls(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , retrieval_workers=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
| 49
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case :int = logging.get_logger(__name__)
__snake_case :Tuple = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Union[str, Any] = '''data2vec-text'''
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=30_522 , __SCREAMING_SNAKE_CASE : Dict=768 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : int=1E-12 , __SCREAMING_SNAKE_CASE : Tuple=1 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Dict="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
class _A ( __UpperCAmelCase ):
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 49
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = BigBirdConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
__a = BigBirdForQuestionAnswering(_UpperCAmelCase )
else:
__a = BigBirdForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
__snake_case :Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case :Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Any = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Dict = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__snake_case :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
__a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def __snake_case ( _UpperCAmelCase ):
__a = ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(_UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : PreTrainedTokenizerBase
UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = -100
UpperCamelCase__ : str = "pt"
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids''']).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 49
| 1
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ):
__a , __a = 9, 14 # noqa: F841
__a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a = defaultdict(_UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a = mst(_UpperCAmelCase )
__a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a = tuple(answer[:2] )
__a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 49
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ):
__a , __a = 9, 14 # noqa: F841
__a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a = defaultdict(_UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a = mst(_UpperCAmelCase )
__a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a = tuple(answer[:2] )
__a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 49
| 1
|
def __snake_case ( _UpperCAmelCase ):
return str(_UpperCAmelCase ) == str(_UpperCAmelCase )[::-1]
def __snake_case ( _UpperCAmelCase ):
return int(_UpperCAmelCase ) + int(str(_UpperCAmelCase )[::-1] )
def __snake_case ( _UpperCAmelCase = 10000 ):
__a = []
for num in range(1 , _UpperCAmelCase ):
__a = 0
__a = num
while iterations < 50:
__a = sum_reverse(_UpperCAmelCase )
iterations += 1
if is_palindrome(_UpperCAmelCase ):
break
else:
lychrel_nums.append(_UpperCAmelCase )
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 49
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
| 49
| 1
|
def __snake_case ( _UpperCAmelCase ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__a = len(_UpperCAmelCase )
__a = max(_UpperCAmelCase )
__a = min(_UpperCAmelCase )
# create the counting array
__a = coll_max + 1 - coll_min
__a = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , _UpperCAmelCase ):
__a = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__a = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , _UpperCAmelCase ) ):
__a = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __snake_case ( _UpperCAmelCase ):
return "".join([chr(_UpperCAmelCase ) for i in counting_sort([ord(_UpperCAmelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
__snake_case :Tuple = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case :Dict = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 49
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__snake_case :Dict = '''bart'''
__snake_case :Tuple = True
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__a = qar_model.eval()
else:
__a , __a = (None, None)
if MODEL_TYPE == "bart":
__a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__a = sas_model.eval()
else:
__a , __a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = faiss.StandardGpuResources()
__a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__a = faiss.IndexFlatIP(128 )
__a = faiss.index_cpu_to_gpu(_UpperCAmelCase , 1 , _UpperCAmelCase )
wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU
else:
__a , __a = (None, None)
__a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
__a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__a = elia['''train_eli5''']
__a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__snake_case ,__snake_case ,__snake_case :List[str] = load_indexes()
__snake_case ,__snake_case ,__snake_case ,__snake_case :Dict = load_models()
__snake_case ,__snake_case :Tuple = load_train_data()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=10 ):
__a = embed_questions_for_retrieval([question] , _UpperCAmelCase , _UpperCAmelCase )
__a , __a = eli5_train_q_index.search(_UpperCAmelCase , _UpperCAmelCase )
__a = [elia_train[int(_UpperCAmelCase )] for i in I[0]]
return nn_examples
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="wiki40b" , _UpperCAmelCase="dense" , _UpperCAmelCase=10 ):
if source == "none":
__a , __a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a = query_qa_dense_index(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__a , __a = query_es_index(
_UpperCAmelCase , _UpperCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCAmelCase , )
__a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__a = '''question: {} context: {}'''.format(_UpperCAmelCase , _UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None),
} )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0.8 ):
with torch.no_grad():
__a = qa_sas_generate(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_answers=1 , num_beams=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase , do_sample=_UpperCAmelCase , temp=_UpperCAmelCase , top_p=_UpperCAmelCase , top_k=_UpperCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__snake_case :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__snake_case :int = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__snake_case :int = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__snake_case :Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__snake_case :int = st.sidebar.checkbox('''Demo options''')
if demo_options:
__snake_case :str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__snake_case :Tuple = action_list.index(action_st)
__snake_case :Optional[int] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__snake_case :Dict = show_type == '''Show full text of passages'''
else:
__snake_case :Dict = 3
__snake_case :str = True
__snake_case :Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__snake_case :List[str] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__snake_case :Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__snake_case :Optional[int] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__snake_case :Optional[int] = '''wiki40b'''
__snake_case :Dict = '''dense'''
__snake_case :Dict = '''beam'''
__snake_case :int = 2
__snake_case :str = 64
__snake_case :Tuple = 256
__snake_case :int = None
__snake_case :List[Any] = None
__snake_case :int = st.sidebar.checkbox('''Generation options''')
if generate_options:
__snake_case :Tuple = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__snake_case :Tuple = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__snake_case :Dict = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__snake_case :Dict = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__snake_case :List[str] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__snake_case :Tuple = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
__snake_case :Any = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
__snake_case :Any = None
# start main text
__snake_case :Dict = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__snake_case :int = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__snake_case :Optional[int] = st.text_input('''Enter your question here:''', '''''')
else:
__snake_case :Optional[int] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__snake_case ,__snake_case :int = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__snake_case ,__snake_case :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__snake_case :Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__snake_case :Union[str, Any] = support_list[:10]
__snake_case :Optional[int] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__snake_case ,__snake_case :Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__snake_case ,__snake_case :Optional[int] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__snake_case :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__snake_case :int = res[1].strip()
if sec_titles == "":
__snake_case :List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
__snake_case :Optional[int] = sec_titles.split(''' & ''')
__snake_case :str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__snake_case :str = find_nearest_training(question)
__snake_case :str = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__snake_case :Optional[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__snake_case :Tuple = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49
| 1
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case :str = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case :Optional[Any] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0522, type=int)
__snake_case :int = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
__snake_case :List[Any] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__snake_case :Any = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case :List[Any] = [0] * args.vocab_size
for k, v in counter.items():
__snake_case :List[str] = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 49
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A ( __UpperCAmelCase ):
def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = eval_examples
__a = post_process_function
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
__a = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
__a = gen_kwargs
__a = self.eval_dataset if eval_dataset is None else eval_dataset
__a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE)
__a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
else:
__a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE)
return metrics
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE)
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''')
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
| 49
| 1
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
__snake_case :Tuple = TypeVar('''T''')
__snake_case :int = Union[List[T], Tuple[T, ...]]
__snake_case :Union[str, Any] = Union[T, List[T], Dict[str, T]]
__snake_case :str = Union[str, bytes, os.PathLike]
| 49
|
from __future__ import annotations
from typing import Any
def __snake_case ( _UpperCAmelCase ):
if not postfix_notation:
return 0
__a = {'''+''', '''-''', '''*''', '''/'''}
__a = []
for token in postfix_notation:
if token in operations:
__a , __a = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_UpperCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__snake_case :Union[str, Any] = get_logger(__name__)
class _A :
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = (
os.path.join(__SCREAMING_SNAKE_CASE , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__a = Extractor
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__a = os.path.abspath(__SCREAMING_SNAKE_CASE)
return os.path.join(self.extract_dir , hash_url_to_filename(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool):
'''simple docstring'''
return force_extract or (
not os.path.isfile(__SCREAMING_SNAKE_CASE) and not (os.path.isdir(__SCREAMING_SNAKE_CASE) and os.listdir(__SCREAMING_SNAKE_CASE))
)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool = False):
'''simple docstring'''
__a = self.extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE)
if not extractor_format:
return input_path
__a = self._get_output_path(__SCREAMING_SNAKE_CASE)
if self._do_extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return output_path
class _A ( __UpperCAmelCase ):
@classmethod
@abstractmethod
def _lowerCamelCase ( cls : Dict , __SCREAMING_SNAKE_CASE : Union[Path, str] , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]):
'''simple docstring'''
...
class _A ( __UpperCAmelCase ,__UpperCAmelCase ):
UpperCamelCase__ : List[bytes] = []
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
with open(__SCREAMING_SNAKE_CASE , '''rb''') as f:
return f.read(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : bytes = b""):
'''simple docstring'''
if not magic_number:
__a = max(len(__SCREAMING_SNAKE_CASE) for cls_magic_number in cls.magic_numbers)
try:
__a = cls.read_magic_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
except OSError:
return False
return any(magic_number.startswith(__SCREAMING_SNAKE_CASE) for cls_magic_number in cls.magic_numbers)
class _A ( __UpperCAmelCase ):
@classmethod
def _lowerCamelCase ( cls : Optional[int] , __SCREAMING_SNAKE_CASE : Union[Path, str] , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
return tarfile.is_tarfile(__SCREAMING_SNAKE_CASE)
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
def resolved(__SCREAMING_SNAKE_CASE : str) -> str:
return os.path.realpath(os.path.abspath(__SCREAMING_SNAKE_CASE))
def badpath(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)).startswith(__SCREAMING_SNAKE_CASE)
def badlink(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str) -> bool:
# Links are interpreted relative to the directory containing the link
__a = resolved(os.path.join(__SCREAMING_SNAKE_CASE , os.path.dirname(info.name)))
return badpath(info.linkname , base=__SCREAMING_SNAKE_CASE)
__a = resolved(__SCREAMING_SNAKE_CASE)
for finfo in members:
if badpath(finfo.name , __SCREAMING_SNAKE_CASE):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)')
elif finfo.issym() and badlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}')
elif finfo.islnk() and badlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}')
else:
yield finfo
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]):
'''simple docstring'''
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
__a = tarfile.open(__SCREAMING_SNAKE_CASE)
tar_file.extractall(__SCREAMING_SNAKE_CASE , members=TarExtractor.safemembers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
tar_file.close()
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = [B'''\x1F\x8B''']
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]):
'''simple docstring'''
with gzip.open(__SCREAMING_SNAKE_CASE , '''rb''') as gzip_file:
with open(__SCREAMING_SNAKE_CASE , '''wb''') as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : bytes = b""):
'''simple docstring'''
if super().is_extractable(__SCREAMING_SNAKE_CASE , magic_number=__SCREAMING_SNAKE_CASE):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__SCREAMING_SNAKE_CASE , '''rb''') as fp:
__a = _EndRecData(__SCREAMING_SNAKE_CASE)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__a = fp.read(__SCREAMING_SNAKE_CASE) # CD is where we expect it to be
if len(__SCREAMING_SNAKE_CASE) == sizeCentralDir:
__a = struct.unpack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]):
'''simple docstring'''
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , '''r''') as zip_file:
zip_file.extractall(__SCREAMING_SNAKE_CASE)
zip_file.close()
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]):
'''simple docstring'''
with lzma.open(__SCREAMING_SNAKE_CASE) as compressed_file:
with open(__SCREAMING_SNAKE_CASE , '''wb''') as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : int = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''')
import rarfile
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
__a = rarfile.RarFile(__SCREAMING_SNAKE_CASE)
rf.extractall(__SCREAMING_SNAKE_CASE)
rf.close()
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[Any] = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''')
import zstandard as zstd
__a = zstd.ZstdDecompressor()
with open(__SCREAMING_SNAKE_CASE , '''rb''') as ifh, open(__SCREAMING_SNAKE_CASE , '''wb''') as ofh:
dctx.copy_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : str = [B'''\x42\x5A\x68''']
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]):
'''simple docstring'''
with bza.open(__SCREAMING_SNAKE_CASE , '''rb''') as compressed_file:
with open(__SCREAMING_SNAKE_CASE , '''wb''') as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Union[str, Any] = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''')
import pyazr
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
with pyazr.SevenZipFile(__SCREAMING_SNAKE_CASE , '''r''') as archive:
archive.extractall(__SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : int = [B'''\x04\x22\x4D\x18''']
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str]):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''')
import lza.frame
with lza.frame.open(__SCREAMING_SNAKE_CASE , '''rb''') as compressed_file:
with open(__SCREAMING_SNAKE_CASE , '''wb''') as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
class _A :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
UpperCamelCase__ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _lowerCamelCase ( cls : int):
'''simple docstring'''
return max(
len(__SCREAMING_SNAKE_CASE)
for extractor in cls.extractors.values()
if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(__SCREAMING_SNAKE_CASE , magic_number_length=__SCREAMING_SNAKE_CASE)
except OSError:
return b""
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : bool = False):
'''simple docstring'''
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=__SCREAMING_SNAKE_CASE , )
__a = cls.infer_extractor_format(__SCREAMING_SNAKE_CASE)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Union[Path, str]): # <Added version="2.4.0"/>
'''simple docstring'''
__a = cls._get_magic_number_max_length()
__a = cls._read_magic_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__SCREAMING_SNAKE_CASE , magic_number=__SCREAMING_SNAKE_CASE):
return extractor_format
@classmethod
def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Union[Path, str] , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[BaseExtractor] = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(__SCREAMING_SNAKE_CASE) , exist_ok=__SCREAMING_SNAKE_CASE)
# Prevent parallel extractions
__a = str(Path(__SCREAMING_SNAKE_CASE).with_suffix('''.lock'''))
with FileLock(__SCREAMING_SNAKE_CASE):
shutil.rmtree(__SCREAMING_SNAKE_CASE , ignore_errors=__SCREAMING_SNAKE_CASE)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=__SCREAMING_SNAKE_CASE , )
__a = extractor if extractor != '''deprecated''' else extractor_format
else:
__a = cls.extractors[extractor_format]
return extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=__SCREAMING_SNAKE_CASE , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__SCREAMING_SNAKE_CASE):
return extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
| 49
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case :Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case :List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 )
__a = parent_a[:random_slice] + parent_a[random_slice:]
__a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = []
# Generate more children proportionally to the fitness score.
__a = int(parent_a[1] * 100 ) + 1
__a = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
__a = population_score[random.randint(0 , _UpperCAmelCase )][0]
__a , __a = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
__a = []
for _ in range(_UpperCAmelCase ):
population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__a = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case :Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case :List[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 49
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :str = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Union[str, Any] = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
__snake_case :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = LxmertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = LxmertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 49
| 1
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case :Optional[int] = logging.getLogger(__name__)
__snake_case :Dict = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__snake_case :Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _A :
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__UpperCAmelCase )} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} ,)
@dataclass
class _A :
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
UpperCamelCase__ : float = field(
default=0.15 ,metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase__ : float = field(
default=1 / 6 ,metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} ,)
UpperCamelCase__ : int = field(
default=5 ,metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
UpperCamelCase__ : int = field(
default=-1 ,metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
def _dataset(_UpperCAmelCase , _UpperCAmelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , ref_path=_UpperCAmelCase , )
return LineByLineTextDataset(tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_UpperCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_UpperCAmelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__a , __a , __a = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__a = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__a = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__a = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__a = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__a = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__a = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__a = AutoModelWithLMHead.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__a = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__a = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__a = (
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__a = (
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , evaluate=_UpperCAmelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__a = DataCollatorForPermutationLanguageModeling(
tokenizer=_UpperCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__a = DataCollatorForWholeWordMask(
tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
else:
__a = DataCollatorForLanguageModeling(
tokenizer=_UpperCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__a = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_collator=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , prediction_loss_only=_UpperCAmelCase , )
# Training
if training_args.do_train:
__a = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_UpperCAmelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__a = trainer.evaluate()
__a = math.exp(eval_output['''eval_loss'''] )
__a = {'''perplexity''': perplexity}
__a = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(_UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _UpperCAmelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(_UpperCAmelCase )
return results
def __snake_case ( _UpperCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 49
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ):
__a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__a = f'{olid} is not a valid Open Library olid'
raise ValueError(_UpperCAmelCase )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def __snake_case ( _UpperCAmelCase ):
__a = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__a = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__a = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = ''', '''.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 49
| 1
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = AlbertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = AlbertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 49
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths}
__a = Text(
cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory)
return dataset
| 49
| 1
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__snake_case :Optional[Any] = '''src/transformers'''
__snake_case :List[Any] = '''docs/source/en/tasks'''
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.readlines()
# Find the start prompt.
__a = 0
while not lines[start_index].startswith(_UpperCAmelCase ):
start_index += 1
start_index += 1
__a = start_index
while not lines[end_index].startswith(_UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__snake_case :Dict = direct_transformers_import(TRANSFORMERS_PATH)
__snake_case :List[str] = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__snake_case :Optional[int] = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __snake_case ( _UpperCAmelCase ):
__a = TASK_GUIDE_TO_MODELS[task_guide]
__a = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_UpperCAmelCase , set() )
__a = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False ):
__a , __a , __a , __a = _find_text_in_file(
filename=os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
__a = get_model_list_for_task(_UpperCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
''' to fix this.''' )
if __name__ == "__main__":
__snake_case :Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case :List[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 49
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case :List[str] = '''\
Text data.
Second line of data.'''
__snake_case :Optional[Any] = '''file'''
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__a = bytes(_UpperCAmelCase , '''utf-8''' )
with zstd.open(_UpperCAmelCase , '''wb''' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__a = input_paths[compression_format]
__a = tmp_path / '''cache'''
__a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
__a = f.read()
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''custom_cache'''
__a = '''custom_extracted_dir'''
__a = tmp_path / '''custom_extracted_path'''
if default_extracted:
__a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) )
__a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a = xz_file
__a = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
__a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
__a = '''./__missing_file__.txt'''
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = get_from_cache(f'tmp://{tmpfs_file}' )
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( ):
with pytest.raises(_UpperCAmelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head('''s3://huggingface.co''' )
| 49
| 1
|
def __snake_case ( _UpperCAmelCase ):
return 10 - x * x
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
# Bolzano theory in order to find if there is a root between a and b
if equation(_UpperCAmelCase ) * equation(_UpperCAmelCase ) >= 0:
raise ValueError('''Wrong space!''' )
__a = a
while (b - a) >= 0.01:
# Find middle point
__a = (a + b) / 2
# Check if middle point is root
if equation(_UpperCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_UpperCAmelCase ) * equation(_UpperCAmelCase ) < 0:
__a = c
else:
__a = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 49
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE)
return config
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = self.dummy_sample_deter + 0.1
__a = self.dummy_sample_deter - 0.1
__a = samplea.shape[0]
__a = torch.stack([samplea, samplea, samplea] , dim=0)
__a = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE)
__a = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
__a = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 11_53.18_33) < 1E-2
assert abs(result_mean.item() - 0.50_05) < 1E-3
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_58.96_06) < 1E-2
assert abs(result_mean.item() - 0.33_72) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''')
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_02.02_96) < 1E-2
assert abs(result_mean.item() - 0.26_31) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
__a = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE):
if i == len(__SCREAMING_SNAKE_CASE) - 1:
__a = -1
else:
__a = timesteps[i + 1]
__a = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE)
__a = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
__a = len(__SCREAMING_SNAKE_CASE)
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
| 49
| 1
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _A :
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=13 , __SCREAMING_SNAKE_CASE : Union[str, Any]=7 , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : List[str]=32 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : Any=37 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : str="None" , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : Optional[int]=4 , __SCREAMING_SNAKE_CASE : List[str]=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = relative_attention
__a = position_biased_input
__a = pos_att_type
__a = scope
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length])
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__a = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = TFDebertaVaModel(config=__SCREAMING_SNAKE_CASE)
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = [input_ids, input_mask]
__a = model(__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = TFDebertaVaForMaskedLM(config=__SCREAMING_SNAKE_CASE)
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = self.num_labels
__a = TFDebertaVaForSequenceClassification(config=__SCREAMING_SNAKE_CASE)
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = self.num_labels
__a = TFDebertaVaForTokenClassification(config=__SCREAMING_SNAKE_CASE)
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = TFDebertaVaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE)
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Dict = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase__ : Optional[int] = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ : int = False
UpperCamelCase__ : Optional[int] = False
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = TFDebertaVaModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''')
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@require_tf
class _A ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
pass
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''')
__a = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]])
__a = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0]
__a = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4)
| 49
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__snake_case :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
requires_backends(self , '''torch''')
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
self.check_model_type(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = {}
__a = {}
__a = {}
# preprocess args
if "points_per_batch" in kwargs:
__a = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__a = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__a = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__a = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__a = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__a = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__a = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__a = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__a = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__a = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__a = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__a = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
__a = self.image_processor.size['''longest_edge''']
__a , __a , __a , __a = self.image_processor.generate_crop_boxes(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
with self.device_placement():
if self.framework == "pt":
__a = self.get_inference_context()
with inference_context():
__a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values'''))
__a = image_embeddings
__a = grid_points.shape[1]
__a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''')
for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = grid_points[:, i : i + points_per_batch, :, :]
__a = input_labels[:, i : i + points_per_batch]
__a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ):
'''simple docstring'''
__a = model_inputs.pop('''input_boxes''')
__a = model_inputs.pop('''is_last''')
__a = model_inputs.pop('''original_sizes''').tolist()
__a = model_inputs.pop('''reshaped_input_sizes''').tolist()
__a = self.model(**__SCREAMING_SNAKE_CASE)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__a = model_outputs['''pred_masks''']
__a = self.image_processor.post_process_masks(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE)
__a = model_outputs['''iou_scores''']
__a , __a , __a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ):
'''simple docstring'''
__a = []
__a = []
__a = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores'''))
all_masks.extend(model_output.pop('''masks'''))
all_boxes.append(model_output.pop('''boxes'''))
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a , __a , __a , __a = self.image_processor.post_process_for_mask_generation(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = defaultdict(__SCREAMING_SNAKE_CASE)
for output in model_outputs:
for k, v in output.items():
extra[k].append(__SCREAMING_SNAKE_CASE)
__a = {}
if output_rle_mask:
__a = rle_mask
if output_bboxes_mask:
__a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 49
| 1
|
from string import ascii_uppercase
__snake_case :str = {str(ord(c) - 55): c for c in ascii_uppercase}
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
__a = ''''''
__a = 0
__a = 0
while div != 1:
__a , __a = divmod(_UpperCAmelCase , _UpperCAmelCase )
if base >= 11 and 9 < mod < 36:
__a = ALPHABET_VALUES[str(_UpperCAmelCase )]
else:
__a = str(_UpperCAmelCase )
new_value += actual_value
__a = num // base
__a = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_UpperCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 49
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__snake_case :str = logging.get_logger(__name__)
__snake_case :int = {'''vocab_file''': '''vocab.txt'''}
__snake_case :List[Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__snake_case :List[str] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__snake_case :Optional[int] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int = ConvBertTokenizer
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case
or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars
):
__a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type'''))
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__SCREAMING_SNAKE_CASE)
__a = do_lower_case
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None):
'''simple docstring'''
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
| 49
| 1
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ):
@register_to_config
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool = False , ):
'''simple docstring'''
super().__init__()
__a = nn.Embedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = nn.Embedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = False
__a = nn.Dropout(p=__SCREAMING_SNAKE_CASE)
__a = TaConfig(
vocab_size=__SCREAMING_SNAKE_CASE , d_model=__SCREAMING_SNAKE_CASE , num_heads=__SCREAMING_SNAKE_CASE , d_kv=__SCREAMING_SNAKE_CASE , d_ff=__SCREAMING_SNAKE_CASE , dropout_rate=__SCREAMING_SNAKE_CASE , feed_forward_proj=__SCREAMING_SNAKE_CASE , is_decoder=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , )
__a = nn.ModuleList()
for lyr_num in range(__SCREAMING_SNAKE_CASE):
__a = TaBlock(__SCREAMING_SNAKE_CASE)
self.encoders.append(__SCREAMING_SNAKE_CASE)
__a = TaLayerNorm(__SCREAMING_SNAKE_CASE)
__a = nn.Dropout(p=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = self.token_embedder(__SCREAMING_SNAKE_CASE)
__a = encoder_input_tokens.shape[1]
__a = torch.arange(__SCREAMING_SNAKE_CASE , device=encoder_input_tokens.device)
x += self.position_encoding(__SCREAMING_SNAKE_CASE)
__a = self.dropout_pre(__SCREAMING_SNAKE_CASE)
# inverted the attention mask
__a = encoder_input_tokens.size()
__a = self.get_extended_attention_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for lyr in self.encoders:
__a = lyr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)[0]
__a = self.layer_norm(__SCREAMING_SNAKE_CASE)
return self.dropout_post(__SCREAMING_SNAKE_CASE), encoder_inputs_mask
| 49
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Any = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__snake_case :List[Any] = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __snake_case ( _UpperCAmelCase ):
__a = EfficientNetConfig()
__a = CONFIG_MAP[model_name]['''hidden_dim''']
__a = CONFIG_MAP[model_name]['''width_coef''']
__a = CONFIG_MAP[model_name]['''depth_coef''']
__a = CONFIG_MAP[model_name]['''image_size''']
__a = CONFIG_MAP[model_name]['''dropout_rate''']
__a = CONFIG_MAP[model_name]['''dw_padding''']
__a = '''huggingface/label-files'''
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( ):
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
def __snake_case ( _UpperCAmelCase ):
__a = CONFIG_MAP[model_name]['''image_size''']
__a = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCAmelCase , )
return preprocessor
def __snake_case ( _UpperCAmelCase ):
__a = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
__a = sorted(set(_UpperCAmelCase ) )
__a = len(_UpperCAmelCase )
__a = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )}
__a = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
__a = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
__a = {}
for item in rename_keys:
if item[0] in original_param_names:
__a = '''efficientnet.''' + item[1]
__a = '''classifier.weight'''
__a = '''classifier.bias'''
return key_mapping
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__a = key_mapping[key]
if "_conv" in key and "kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__a = torch.from_numpy(np.transpose(_UpperCAmelCase ) )
else:
__a = torch.from_numpy(_UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCAmelCase )
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = model_classes[model_name](
include_top=_UpperCAmelCase , weights='''imagenet''' , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , )
__a = original_model.trainable_variables
__a = original_model.non_trainable_variables
__a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__a = param.numpy()
__a = list(tf_params.keys() )
# Load HuggingFace model
__a = get_efficientnet_config(_UpperCAmelCase )
__a = EfficientNetForImageClassification(_UpperCAmelCase ).eval()
__a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
__a = rename_keys(_UpperCAmelCase )
replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Initialize preprocessor and preprocess input image
__a = convert_image_processor(_UpperCAmelCase )
__a = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__a = hf_model(**_UpperCAmelCase )
__a = outputs.logits.detach().numpy()
# Original model inference
__a = False
__a = CONFIG_MAP[model_name]['''image_size''']
__a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__a = image.img_to_array(_UpperCAmelCase )
__a = np.expand_dims(_UpperCAmelCase , axis=0 )
__a = original_model.predict(_UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCAmelCase ):
os.mkdir(_UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCAmelCase )
preprocessor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'Pushing converted {model_name} to the hub...' )
__a = f'efficientnet-{model_name}'
preprocessor.push_to_hub(_UpperCAmelCase )
hf_model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__snake_case :Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 49
| 1
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = inspect.getfile(accelerate.test_utils)
__a = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''test_script.py'''])
__a = os.path.sep.join(inspect.getfile(self.__class__).split(os.path.sep)[:-1])
@require_tpu
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = F'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
__a = [sys.executable] + distributed_args
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy())
| 49
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case :Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
__snake_case :Tuple = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
__snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 49
| 1
|
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return int(input_a == input_a == 0 )
def __snake_case ( ):
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(f'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(f'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(f'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 49
|
from collections import defaultdict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = first_str.lower().strip()
__a = second_str.lower().strip()
# Remove whitespace
__a = first_str.replace(''' ''' , '''''' )
__a = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
# Default values for count should be 0
__a = defaultdict(_UpperCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case :Any = input('''Enter the first string ''').strip()
__snake_case :int = input('''Enter the second string ''').strip()
__snake_case :int = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 49
| 1
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : str = '''M-CLIP'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : Any=1_024 , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = transformerDimSize
__a = imageDimSize
super().__init__(**__SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = MCLIPConfig
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = XLMRobertaModel(__SCREAMING_SNAKE_CASE)
__a = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = self.transformer(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0]
__a = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(__SCREAMING_SNAKE_CASE), embs
| 49
|
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case :Any = logging.getLogger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = '''masked_bert'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=30_522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : List[str]="topK" , __SCREAMING_SNAKE_CASE : List[Any]="constant" , __SCREAMING_SNAKE_CASE : int=0.0 , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = pruning_method
__a = mask_init
__a = mask_scale
| 49
| 1
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__snake_case :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Any , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any=None):
'''simple docstring'''
__a = {}
if top_k is not None:
__a = top_k
return {}, {}, postprocess_params
def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
return model_inputs
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = self.model(**__SCREAMING_SNAKE_CASE)
return model_outputs
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=5):
'''simple docstring'''
if top_k > self.model.config.num_labels:
__a = self.model.config.num_labels
if self.framework == "pt":
__a = model_outputs.logits.softmax(-1)[0]
__a , __a = probs.topk(__SCREAMING_SNAKE_CASE)
elif self.framework == "tf":
__a = stable_softmax(model_outputs.logits , axis=-1)[0]
__a = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE)
__a , __a = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
__a = scores.tolist()
__a = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)]
| 49
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _A :
UpperCamelCase__ : Optional[Union[str, Path]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = 1
UpperCamelCase__ : Optional[Union[str, bool]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
| 49
| 1
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any]=13 , __SCREAMING_SNAKE_CASE : str=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : Tuple=[1, 2, 1] , __SCREAMING_SNAKE_CASE : Tuple=[2, 2, 4] , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : int=2.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-5 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[str]=10 , __SCREAMING_SNAKE_CASE : Optional[int]=8 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = depths
__a = num_heads
__a = window_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = use_absolute_embeddings
__a = patch_norm
__a = layer_norm_eps
__a = initializer_range
__a = is_training
__a = scope
__a = use_labels
__a = type_sequence_label_size
__a = encoder_stride
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = SwinvaModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE)
__a = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
__a = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__a = 1
__a = SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = self.type_sequence_label_size
__a = SwinvaForImageClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase__ : List[Any] = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : List[Any] = False
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : List[Any] = False
UpperCamelCase__ : Optional[Any] = False
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = SwinvaModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''')
def _lowerCamelCase ( self : str):
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''')
def _lowerCamelCase ( self : int):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
__a = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
__a = outputs.attentions
__a = len(self.model_tester.depths)
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = config.window_size**2
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
__a = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__a = len(__SCREAMING_SNAKE_CASE)
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
if hasattr(self.model_tester , '''num_hidden_states_types'''):
__a = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__a = 2
self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE))
__a = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
__a = outputs.hidden_states
__a = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths) + 1)
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# Swinv2 has a different seq_length
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
__a = outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
__a , __a , __a , __a = reshaped_hidden_states[0].shape
__a = (
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__a = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__a = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__a = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__a = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = _config_zero_init(__SCREAMING_SNAKE_CASE)
for model_class in self.all_model_classes:
__a = model_class(config=__SCREAMING_SNAKE_CASE)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''')
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''').to(
__SCREAMING_SNAKE_CASE)
__a = self.default_image_processor
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
__a = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor([-0.39_47, -0.43_06, 0.00_26]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 49
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Union[str, Any] = logging.get_logger(__name__)
__snake_case :Any = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''switch_transformers'''
UpperCamelCase__ : Optional[Any] = ['''past_key_values''']
UpperCamelCase__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=32_128 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict="float32" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : List[str]=0.0_01 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
__a = vocab_size
__a = d_model
__a = d_kv
__a = d_ff
__a = num_sparse_encoder_layers
__a = num_layers
__a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__a = self.num_layers // self.num_sparse_encoder_layers
else:
__a = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__a = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__a = self.num_decoder_layers # HACK: this will create 0 sparse layers
__a = num_heads
__a = num_experts
__a = expert_capacity
__a = router_bias
__a = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}')
__a = router_dtype
__a = router_ignore_padding_tokens
__a = relative_attention_num_buckets
__a = relative_attention_max_distance
__a = dropout_rate
__a = layer_norm_epsilon
__a = initializer_factor
__a = feed_forward_proj
__a = use_cache
__a = add_router_probs
__a = router_z_loss_coef
__a = router_aux_loss_coef
__a = self.feed_forward_proj.split('''-''')
__a = act_info[-1]
__a = act_info[0] == '''gated'''
if len(__SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__a = '''gelu_new'''
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 49
| 1
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__snake_case :Union[str, Any] = trt.Logger(trt.Logger.WARNING)
__snake_case :int = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__snake_case :Optional[Any] = logging.getLogger(__name__)
__snake_case :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__snake_case :Any = parser.parse_args()
if args.tokenizer_name:
__snake_case :List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__snake_case :str = args.per_device_eval_batch_size
__snake_case :Union[str, Any] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__snake_case :Dict = True
__snake_case :List[str] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__snake_case :Any = '''temp_engine/bert-fp16.engine'''
if args.inta:
__snake_case :Tuple = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__snake_case :Any = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__snake_case :Any = [network.get_input(i) for i in range(network.num_inputs)]
__snake_case :Optional[int] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__snake_case :Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__snake_case :List[str] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__snake_case :List[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
__a = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
__a = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _UpperCAmelCase )
# start time
__a = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__a = time.time()
__a = end_time - start_time
__a = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__snake_case :List[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__snake_case :Dict = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__snake_case :Optional[int] = raw_datasets['''validation'''].column_names
__snake_case :Tuple = '''question''' if '''question''' in column_names else column_names[0]
__snake_case :List[Any] = '''context''' if '''context''' in column_names else column_names[1]
__snake_case :Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__snake_case :Optional[Any] = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__snake_case :int = min(args.max_seq_length, tokenizer.model_max_length)
def __snake_case ( _UpperCAmelCase ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__a = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__a = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=_UpperCAmelCase , stride=args.doc_stride , return_overflowing_tokens=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__a = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__a = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__a = tokenized_examples.sequence_ids(_UpperCAmelCase )
__a = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__a = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__a = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__snake_case :Optional[Any] = raw_datasets['''validation''']
# Validation Feature Creation
__snake_case :Tuple = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__snake_case :Optional[Any] = default_data_collator
__snake_case :Union[str, Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__snake_case :List[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__a = postprocess_qa_predictions(
examples=_UpperCAmelCase , features=_UpperCAmelCase , predictions=_UpperCAmelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_UpperCAmelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__a = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
__a = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
__a = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase , label_ids=_UpperCAmelCase )
__snake_case :Union[str, Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def __snake_case ( _UpperCAmelCase ):
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__snake_case :Tuple = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__snake_case :List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__snake_case :Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__snake_case :str = cuda.mem_alloc(h_outputa.nbytes)
__snake_case :Any = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__snake_case :int = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f' Num examples = {len(eval_dataset)}')
logger.info(f' Batch size = {args.per_device_eval_batch_size}')
__snake_case :int = 0.0
__snake_case :str = 0
__snake_case :Optional[Any] = timeit.default_timer()
__snake_case :Dict = None
for step, batch in enumerate(eval_dataloader):
__snake_case ,__snake_case :Tuple = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__snake_case ,__snake_case :Tuple = outputs
__snake_case :List[Any] = torch.tensor(start_logits)
__snake_case :Dict = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__snake_case :Dict = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__snake_case :str = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__snake_case :Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__snake_case :List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__snake_case :Tuple = nested_truncate(all_preds, len(eval_dataset))
__snake_case :Tuple = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__snake_case :str = post_processing_function(eval_examples, eval_dataset, all_preds)
__snake_case :Any = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'Evaluation metrics: {eval_metric}')
| 49
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__snake_case :List[Any] = logging.getLogger(__name__)
class _A :
def __init__( self : List[str]):
'''simple docstring'''
__a = False
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
if not self.initialized:
__a = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = True
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.retriever.index.init_index()
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a , __a = self.retriever._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return doc_ids, retrieved_doc_embeds
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None):
'''simple docstring'''
if index is not None and index.is_initialized() and len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''')
super().__init__(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for worker in self.retrieval_workers
])
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
logger.info('''initializing retrieval''')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
__a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
__a , __a = ray.get(random_worker.retrieve.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
else:
__a , __a = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return super(__SCREAMING_SNAKE_CASE , cls).get_tokenizers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE) or RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE)
__a = rag_tokenizer.question_encoder
__a = rag_tokenizer.generator
if indexed_dataset is not None:
__a = '''custom'''
__a = CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE)
else:
__a = cls._build_index(__SCREAMING_SNAKE_CASE)
return cls(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , retrieval_workers=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
| 49
| 1
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :Union[str, Any] = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Dict = AlbertTokenizer
UpperCamelCase__ : List[str] = AlbertTokenizerFast
UpperCamelCase__ : int = True
UpperCamelCase__ : int = True
UpperCamelCase__ : Any = True
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = AlbertTokenizer(__SCREAMING_SNAKE_CASE)
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = '''this is a test'''
__a = '''this is a test'''
return input_text, output_text
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = '''<pad>'''
__a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<pad>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''▁eloquent''')
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 30_000)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30_000)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = '''I was born in 92000, and this is falsé.'''
__a = tokenizer.tokenize(__SCREAMING_SNAKE_CASE)
__a = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE)
__a = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(__SCREAMING_SNAKE_CASE)
__a = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = AlbertTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [48, 25, 21, 1_289])
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''])
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9])
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = AlbertTokenizer(__SCREAMING_SNAKE_CASE)
__a = tokenizer.encode('''sequence builders''')
__a = tokenizer.encode('''multi-sequence build''')
__a = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE)
__a = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 49
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = BigBirdConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
__a = BigBirdForQuestionAnswering(_UpperCAmelCase )
else:
__a = BigBirdForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
__snake_case :Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 49
| 1
|
import os
def __snake_case ( ):
__a = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
__a = os.path.join(_UpperCAmelCase , '''triangle.txt''' )
with open(_UpperCAmelCase ) as f:
__a = f.readlines()
__a = []
for line in triangle:
__a = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_UpperCAmelCase ) )
a.append(_UpperCAmelCase )
for i in range(1 , len(_UpperCAmelCase ) ):
for j in range(len(a[i] ) ):
__a = a[i - 1][j] if j != len(a[i - 1] ) else 0
__a = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_UpperCAmelCase , _UpperCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 49
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
__a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def __snake_case ( _UpperCAmelCase ):
__a = ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(_UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : PreTrainedTokenizerBase
UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = -100
UpperCamelCase__ : str = "pt"
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids''']).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 49
| 1
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__snake_case :Tuple = logging.getLogger()
__snake_case :Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
__a = {'''source''': '''What is love ?''', '''target''': '''life'''}
__a = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__a = '''\n'''.join([contents[field]] * n_lines[split])
with open(os.path.join(__SCREAMING_SNAKE_CASE , F'{split}.{field}') , '''w''') as f:
f.write(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str = "pytorch"):
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir()
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''output''')
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''data''')
self._create_dummy_data(data_dir=__SCREAMING_SNAKE_CASE)
__a = F'\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '.split()
if gpus > 0:
testargs.append(F'--gpus={gpus}')
if is_apex_available():
testargs.append('''--fp16''')
else:
testargs.append('''--gpus=0''')
testargs.append('''--distributed_backend=ddp_cpu''')
testargs.append('''--num_processes=2''')
__a = [sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env())
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''metrics.json''')
with open(__SCREAMING_SNAKE_CASE) as f:
__a = json.load(__SCREAMING_SNAKE_CASE)
return result
@require_torch_gpu
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self._run_finetune(gpus=1)
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2)
@require_torch_multi_gpu
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self._run_finetune(gpus=2)
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2)
@require_torch_gpu
@require_ray
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self._run_finetune(gpus=1 , distributed_retriever='''ray''')
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2)
@require_torch_multi_gpu
@require_ray
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self._run_finetune(gpus=1 , distributed_retriever='''ray''')
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2)
| 49
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ):
__a , __a = 9, 14 # noqa: F841
__a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a = defaultdict(_UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a = mst(_UpperCAmelCase )
__a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a = tuple(answer[:2] )
__a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 49
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Any = logging.get_logger(__name__)
__snake_case :int = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''sew-d'''
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : List[Any]=768 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Tuple=3_072 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=512 , __SCREAMING_SNAKE_CASE : Any=256 , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=("p2c", "c2p") , __SCREAMING_SNAKE_CASE : str="layer_norm" , __SCREAMING_SNAKE_CASE : str="gelu_python" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : int=1E-7 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-5 , __SCREAMING_SNAKE_CASE : Any="group" , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __SCREAMING_SNAKE_CASE : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __SCREAMING_SNAKE_CASE : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : List[str]=128 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : str=0.05 , __SCREAMING_SNAKE_CASE : List[Any]=10 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=10 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : Any="mean" , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=256 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE)
__a = hidden_size
__a = feat_extract_norm
__a = feat_extract_activation
__a = list(__SCREAMING_SNAKE_CASE)
__a = list(__SCREAMING_SNAKE_CASE)
__a = list(__SCREAMING_SNAKE_CASE)
__a = conv_bias
__a = num_conv_pos_embeddings
__a = num_conv_pos_embedding_groups
__a = len(self.conv_dim)
__a = num_hidden_layers
__a = intermediate_size
__a = squeeze_factor
__a = max_position_embeddings
__a = position_buckets
__a = share_att_key
__a = relative_attention
__a = norm_rel_ebd
__a = list(__SCREAMING_SNAKE_CASE)
__a = hidden_act
__a = num_attention_heads
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = feat_proj_dropout
__a = final_dropout
__a = layer_norm_eps
__a = feature_layer_norm_eps
__a = initializer_range
__a = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)'
F'= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a = apply_spec_augment
__a = mask_time_prob
__a = mask_time_length
__a = mask_time_min_masks
__a = mask_feature_prob
__a = mask_feature_length
__a = mask_feature_min_masks
# ctc loss
__a = ctc_loss_reduction
__a = ctc_zero_infinity
# sequence classification
__a = use_weighted_layer_sum
__a = classifier_proj_size
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 49
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
| 49
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase__ : str = field(default='''summarization''' ,metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
UpperCamelCase__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
UpperCamelCase__ : str = "text"
UpperCamelCase__ : str = "summary"
@property
def _lowerCamelCase ( self : str):
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 49
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__snake_case :Dict = '''bart'''
__snake_case :Tuple = True
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__a = qar_model.eval()
else:
__a , __a = (None, None)
if MODEL_TYPE == "bart":
__a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__a = sas_model.eval()
else:
__a , __a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = faiss.StandardGpuResources()
__a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__a = faiss.IndexFlatIP(128 )
__a = faiss.index_cpu_to_gpu(_UpperCAmelCase , 1 , _UpperCAmelCase )
wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU
else:
__a , __a = (None, None)
__a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
__a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__a = elia['''train_eli5''']
__a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__snake_case ,__snake_case ,__snake_case :List[str] = load_indexes()
__snake_case ,__snake_case ,__snake_case ,__snake_case :Dict = load_models()
__snake_case ,__snake_case :Tuple = load_train_data()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=10 ):
__a = embed_questions_for_retrieval([question] , _UpperCAmelCase , _UpperCAmelCase )
__a , __a = eli5_train_q_index.search(_UpperCAmelCase , _UpperCAmelCase )
__a = [elia_train[int(_UpperCAmelCase )] for i in I[0]]
return nn_examples
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="wiki40b" , _UpperCAmelCase="dense" , _UpperCAmelCase=10 ):
if source == "none":
__a , __a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a = query_qa_dense_index(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__a , __a = query_es_index(
_UpperCAmelCase , _UpperCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCAmelCase , )
__a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__a = '''question: {} context: {}'''.format(_UpperCAmelCase , _UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None),
} )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0.8 ):
with torch.no_grad():
__a = qa_sas_generate(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_answers=1 , num_beams=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase , do_sample=_UpperCAmelCase , temp=_UpperCAmelCase , top_p=_UpperCAmelCase , top_k=_UpperCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__snake_case :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__snake_case :int = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__snake_case :int = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__snake_case :Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__snake_case :int = st.sidebar.checkbox('''Demo options''')
if demo_options:
__snake_case :str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__snake_case :Tuple = action_list.index(action_st)
__snake_case :Optional[int] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__snake_case :Dict = show_type == '''Show full text of passages'''
else:
__snake_case :Dict = 3
__snake_case :str = True
__snake_case :Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__snake_case :List[str] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__snake_case :Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__snake_case :Optional[int] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__snake_case :Optional[int] = '''wiki40b'''
__snake_case :Dict = '''dense'''
__snake_case :Dict = '''beam'''
__snake_case :int = 2
__snake_case :str = 64
__snake_case :Tuple = 256
__snake_case :int = None
__snake_case :List[Any] = None
__snake_case :int = st.sidebar.checkbox('''Generation options''')
if generate_options:
__snake_case :Tuple = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__snake_case :Tuple = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__snake_case :Dict = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__snake_case :Dict = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__snake_case :List[str] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__snake_case :Tuple = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
__snake_case :Any = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
__snake_case :Any = None
# start main text
__snake_case :Dict = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__snake_case :int = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__snake_case :Optional[int] = st.text_input('''Enter your question here:''', '''''')
else:
__snake_case :Optional[int] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__snake_case ,__snake_case :int = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__snake_case ,__snake_case :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__snake_case :Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__snake_case :Union[str, Any] = support_list[:10]
__snake_case :Optional[int] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__snake_case ,__snake_case :Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__snake_case ,__snake_case :Optional[int] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__snake_case :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__snake_case :int = res[1].strip()
if sec_titles == "":
__snake_case :List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
__snake_case :Optional[int] = sec_titles.split(''' & ''')
__snake_case :str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__snake_case :str = find_nearest_training(question)
__snake_case :str = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__snake_case :Optional[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__snake_case :Tuple = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49
| 1
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Any = LEDTokenizer
UpperCamelCase__ : str = LEDTokenizerFast
UpperCamelCase__ : List[Any] = True
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
super().setUp()
__a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__a = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
__a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__a = {'''unk_token''': '''<unk>'''}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : str , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''')
@cached_property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''')
@require_torch
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__a = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE) , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
__a = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@require_torch
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertIn('''input_ids''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''attention_mask''' , __SCREAMING_SNAKE_CASE)
self.assertNotIn('''labels''' , __SCREAMING_SNAKE_CASE)
self.assertNotIn('''decoder_attention_mask''' , __SCREAMING_SNAKE_CASE)
@require_torch
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(text_target=__SCREAMING_SNAKE_CASE , max_length=32 , padding='''max_length''' , return_tensors='''pt''')
self.assertEqual(32 , targets['''input_ids'''].shape[1])
@require_torch
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(
['''I am a small frog''' * 1_024, '''I am a small frog'''] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(batch.input_ids.shape , (2, 5_122))
@require_torch
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = ['''A long paragraph for summarization.''']
__a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
__a = tokenizer(text_target=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
__a = inputs['''input_ids''']
__a = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = ['''Summary of the text.''', '''Another summary.''']
__a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__a = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE)
__a = [[0] * len(__SCREAMING_SNAKE_CASE) for x in encoded_output['''input_ids''']]
__a = tokenizer.pad(__SCREAMING_SNAKE_CASE)
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
__a = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = '''A, <mask> AllenNLP sentence.'''
__a = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE)
__a = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE)
self.assertEqual(sum(tokens_r['''token_type_ids''']) , sum(tokens_p['''token_type_ids''']))
self.assertEqual(
sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) , sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) , )
__a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''])
__a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''])
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
| 49
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A ( __UpperCAmelCase ):
def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = eval_examples
__a = post_process_function
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
__a = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
__a = gen_kwargs
__a = self.eval_dataset if eval_dataset is None else eval_dataset
__a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE)
__a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
else:
__a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE)
return metrics
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE)
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''')
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
| 49
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''')
__a = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__a = model(__SCREAMING_SNAKE_CASE)['''last_hidden_state''']
__a = tf.TensorShape((1, 10, 768))
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE)
# compare the actual values for a slice.
__a = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 49
|
from __future__ import annotations
from typing import Any
def __snake_case ( _UpperCAmelCase ):
if not postfix_notation:
return 0
__a = {'''+''', '''-''', '''*''', '''/'''}
__a = []
for token in postfix_notation:
if token in operations:
__a , __a = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_UpperCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[Any] = XLMProphetNetTokenizer
UpperCamelCase__ : List[Any] = False
UpperCamelCase__ : Optional[Any] = True
def _lowerCamelCase ( self : str):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE)
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = '''[PAD]'''
__a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''[PAD]''')
self.assertEqual(vocab_keys[1] , '''[CLS]''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 1_012)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_012)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''')
@slow
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = '''Hello World!'''
__a = [35_389, 6_672, 49, 2]
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE))
@slow
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = {'''input_ids''': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 49
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case :Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case :List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 )
__a = parent_a[:random_slice] + parent_a[random_slice:]
__a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = []
# Generate more children proportionally to the fitness score.
__a = int(parent_a[1] * 100 ) + 1
__a = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
__a = population_score[random.randint(0 , _UpperCAmelCase )][0]
__a , __a = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
__a = []
for _ in range(_UpperCAmelCase ):
population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__a = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case :Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case :List[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 49
| 1
|
import functools
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len(_UpperCAmelCase )
__a = len(_UpperCAmelCase )
@functools.cache
def min_distance(_UpperCAmelCase , _UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _UpperCAmelCase ) , 1 + min_distance(_UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = LxmertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = LxmertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 49
| 1
|
from __future__ import annotations
__snake_case :Dict = list[tuple[int, int]]
__snake_case :Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case :str = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _A :
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : Node | None , ):
'''simple docstring'''
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = abs(self.pos_x - self.goal_x)
__a = abs(self.pos_y - self.goal_y)
return dx + dy
def __lt__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
return self.f_cost < other.f_cost
class _A :
def __init__( self : str , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : tuple[int, int]):
'''simple docstring'''
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __SCREAMING_SNAKE_CASE)
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __SCREAMING_SNAKE_CASE)
__a = [self.start]
__a = []
__a = False
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
__a = True
return self.retrace_path(__SCREAMING_SNAKE_CASE)
self.closed_nodes.append(__SCREAMING_SNAKE_CASE)
__a = self.get_successors(__SCREAMING_SNAKE_CASE)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__SCREAMING_SNAKE_CASE)
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(__SCREAMING_SNAKE_CASE))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__SCREAMING_SNAKE_CASE)
else:
self.open_nodes.append(__SCREAMING_SNAKE_CASE)
if not self.reached:
return [self.start.pos]
return None
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Node):
'''simple docstring'''
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(__SCREAMING_SNAKE_CASE) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __SCREAMING_SNAKE_CASE , ))
return successors
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Node | None):
'''simple docstring'''
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__snake_case :List[str] = (0, 0)
__snake_case :Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
__snake_case :int = GreedyBestFirst(init, goal)
__snake_case :List[str] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__snake_case :Optional[Any] = 2
for elem in grid:
print(elem)
| 49
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ):
__a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__a = f'{olid} is not a valid Open Library olid'
raise ValueError(_UpperCAmelCase )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def __snake_case ( _UpperCAmelCase ):
__a = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__a = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__a = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = ''', '''.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 49
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__snake_case :Optional[int] = '''src/diffusers'''
__snake_case :Any = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
__snake_case :List[Any] = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
__snake_case :int = spec.loader.load_module()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return line.startswith(_UpperCAmelCase ) or len(_UpperCAmelCase ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , _UpperCAmelCase ) is not None
def __snake_case ( _UpperCAmelCase ):
__a = object_name.split('''.''' )
__a = 0
# First let's find the module where our object lives.
__a = parts[i]
while i < len(_UpperCAmelCase ) and not os.path.isfile(os.path.join(_UpperCAmelCase , f'{module}.py' ) ):
i += 1
if i < len(_UpperCAmelCase ):
__a = os.path.join(_UpperCAmelCase , parts[i] )
if i >= len(_UpperCAmelCase ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(_UpperCAmelCase , f'{module}.py' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.readlines()
# Now let's find the class / func in the code!
__a = ''''''
__a = 0
for name in parts[i + 1 :]:
while (
line_index < len(_UpperCAmelCase ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_UpperCAmelCase ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__a = line_index
while line_index < len(_UpperCAmelCase ) and _should_continue(lines[line_index] , _UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__a = lines[start_index:line_index]
return "".join(_UpperCAmelCase )
__snake_case :List[Any] = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
__snake_case :Tuple = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
__snake_case :Union[str, Any] = re.compile(r'''<FILL\s+[^>]*>''')
def __snake_case ( _UpperCAmelCase ):
__a = code.split('''\n''' )
__a = 0
while idx < len(_UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_UpperCAmelCase ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def __snake_case ( _UpperCAmelCase ):
__a = len(get_indent(_UpperCAmelCase ) ) > 0
if has_indent:
__a = f'class Bla:\n{code}'
__a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_UpperCAmelCase )
__a = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
__a , __a = style_docstrings_in_code(_UpperCAmelCase )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False ):
with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.readlines()
__a = []
__a = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_UpperCAmelCase ):
__a = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__a , __a , __a = search.groups()
__a = find_code_in_diffusers(_UpperCAmelCase )
__a = get_indent(_UpperCAmelCase )
__a = line_index + 1 if indent == theoretical_indent else line_index + 2
__a = theoretical_indent
__a = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__a = True
while line_index < len(_UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(_UpperCAmelCase ):
break
__a = lines[line_index]
__a = _should_continue(_UpperCAmelCase , _UpperCAmelCase ) and re.search(f'^{indent}# End copy' , _UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__a = lines[start_index:line_index]
__a = ''''''.join(_UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
__a = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(_UpperCAmelCase ) is None]
__a = '''\n'''.join(_UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_UpperCAmelCase ) > 0:
__a = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__a = [_re_replace_pattern.search(_UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__a , __a , __a = pattern.groups()
__a = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if option.strip() == "all-casing":
__a = re.sub(obja.lower() , obja.lower() , _UpperCAmelCase )
__a = re.sub(obja.upper() , obja.upper() , _UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__a = blackify(lines[start_index - 1] + theoretical_code )
__a = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__a = lines[:start_index] + [theoretical_code] + lines[line_index:]
__a = start_index + 1
if overwrite and len(_UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_UpperCAmelCase )
return diffs
def __snake_case ( _UpperCAmelCase = False ):
__a = glob.glob(os.path.join(_UpperCAmelCase , '''**/*.py''' ) , recursive=_UpperCAmelCase )
__a = []
for filename in all_files:
__a = is_copy_consistent(_UpperCAmelCase , _UpperCAmelCase )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(_UpperCAmelCase ) > 0:
__a = '''\n'''.join(_UpperCAmelCase )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
__snake_case :List[str] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case :int = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 49
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths}
__a = Text(
cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory)
return dataset
| 49
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__snake_case :str = logging.get_logger(__name__)
__snake_case :int = {'''vocab_file''': '''vocab.txt'''}
__snake_case :List[Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__snake_case :List[str] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__snake_case :Optional[int] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int = ConvBertTokenizer
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case
or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars
):
__a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type'''))
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__SCREAMING_SNAKE_CASE)
__a = do_lower_case
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None):
'''simple docstring'''
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
| 49
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case :List[str] = '''\
Text data.
Second line of data.'''
__snake_case :Optional[Any] = '''file'''
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__a = bytes(_UpperCAmelCase , '''utf-8''' )
with zstd.open(_UpperCAmelCase , '''wb''' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__a = input_paths[compression_format]
__a = tmp_path / '''cache'''
__a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
__a = f.read()
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''custom_cache'''
__a = '''custom_extracted_dir'''
__a = tmp_path / '''custom_extracted_path'''
if default_extracted:
__a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) )
__a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a = xz_file
__a = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
__a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
__a = '''./__missing_file__.txt'''
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = get_from_cache(f'tmp://{tmpfs_file}' )
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( ):
with pytest.raises(_UpperCAmelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head('''s3://huggingface.co''' )
| 49
| 1
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = SMALL_MODEL_IDENTIFIER
__a = '''pt'''
__a = '''tf'''
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = AutoModel.from_pretrained(self.test_model)
model_pt.save_pretrained(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = TFAutoModel.from_pretrained(self.test_model , from_pt=__SCREAMING_SNAKE_CASE)
model_tf.save_pretrained(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = '''mock_framework'''
# Framework provided - return whatever the user provides
__a = FeaturesManager.determine_framework(self.test_model , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__SCREAMING_SNAKE_CASE)
__a = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__SCREAMING_SNAKE_CASE)
__a = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__SCREAMING_SNAKE_CASE)
__a = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__SCREAMING_SNAKE_CASE)
__a = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_tf)
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__SCREAMING_SNAKE_CASE):
__a = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = MagicMock(return_value=__SCREAMING_SNAKE_CASE)
with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE):
__a = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt)
# PyTorch not in environment -> use TensorFlow
__a = MagicMock(return_value=__SCREAMING_SNAKE_CASE)
with patch('''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE):
__a = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_tf)
# Both in environment -> use PyTorch
__a = MagicMock(return_value=__SCREAMING_SNAKE_CASE)
__a = MagicMock(return_value=__SCREAMING_SNAKE_CASE)
with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE), patch(
'''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE):
__a = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt)
# Both not in environment -> raise error
__a = MagicMock(return_value=__SCREAMING_SNAKE_CASE)
__a = MagicMock(return_value=__SCREAMING_SNAKE_CASE)
with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE), patch(
'''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE):
with self.assertRaises(__SCREAMING_SNAKE_CASE):
__a = FeaturesManager.determine_framework(self.test_model)
| 49
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE)
return config
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = self.dummy_sample_deter + 0.1
__a = self.dummy_sample_deter - 0.1
__a = samplea.shape[0]
__a = torch.stack([samplea, samplea, samplea] , dim=0)
__a = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE)
__a = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
__a = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 11_53.18_33) < 1E-2
assert abs(result_mean.item() - 0.50_05) < 1E-3
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_58.96_06) < 1E-2
assert abs(result_mean.item() - 0.33_72) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''')
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_02.02_96) < 1E-2
assert abs(result_mean.item() - 0.26_31) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
__a = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE):
if i == len(__SCREAMING_SNAKE_CASE) - 1:
__a = -1
else:
__a = timesteps[i + 1]
__a = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE)
__a = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
__a = len(__SCREAMING_SNAKE_CASE)
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
| 49
| 1
|
import socket
def __snake_case ( ):
__a = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__a = socket.gethostname()
__a = 12312
sock.connect((host, port) )
sock.send(b'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__a = sock.recv(1024 )
if not data:
break
out_file.write(_UpperCAmelCase )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 49
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__snake_case :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
requires_backends(self , '''torch''')
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
self.check_model_type(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = {}
__a = {}
__a = {}
# preprocess args
if "points_per_batch" in kwargs:
__a = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__a = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__a = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__a = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__a = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__a = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__a = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__a = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__a = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__a = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__a = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__a = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
__a = self.image_processor.size['''longest_edge''']
__a , __a , __a , __a = self.image_processor.generate_crop_boxes(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
with self.device_placement():
if self.framework == "pt":
__a = self.get_inference_context()
with inference_context():
__a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values'''))
__a = image_embeddings
__a = grid_points.shape[1]
__a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''')
for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = grid_points[:, i : i + points_per_batch, :, :]
__a = input_labels[:, i : i + points_per_batch]
__a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ):
'''simple docstring'''
__a = model_inputs.pop('''input_boxes''')
__a = model_inputs.pop('''is_last''')
__a = model_inputs.pop('''original_sizes''').tolist()
__a = model_inputs.pop('''reshaped_input_sizes''').tolist()
__a = self.model(**__SCREAMING_SNAKE_CASE)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__a = model_outputs['''pred_masks''']
__a = self.image_processor.post_process_masks(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE)
__a = model_outputs['''iou_scores''']
__a , __a , __a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ):
'''simple docstring'''
__a = []
__a = []
__a = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores'''))
all_masks.extend(model_output.pop('''masks'''))
all_boxes.append(model_output.pop('''boxes'''))
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a , __a , __a , __a = self.image_processor.post_process_for_mask_generation(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = defaultdict(__SCREAMING_SNAKE_CASE)
for output in model_outputs:
for k, v in output.items():
extra[k].append(__SCREAMING_SNAKE_CASE)
__a = {}
if output_rle_mask:
__a = rle_mask
if output_bboxes_mask:
__a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 49
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : str = field(default='''automatic-speech-recognition''' ,metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase__ : ClassVar[Features] = Features({'''audio''': Audio()} )
UpperCamelCase__ : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
UpperCamelCase__ : str = "audio"
UpperCamelCase__ : str = "transcription"
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.')
if not isinstance(features[self.audio_column] , __SCREAMING_SNAKE_CASE):
raise ValueError(F'Column {self.audio_column} is not an Audio type.')
__a = copy.deepcopy(self)
__a = self.input_schema.copy()
__a = features[self.audio_column]
__a = input_schema
return task_template
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 49
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__snake_case :str = logging.get_logger(__name__)
__snake_case :int = {'''vocab_file''': '''vocab.txt'''}
__snake_case :List[Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__snake_case :List[str] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__snake_case :Optional[int] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int = ConvBertTokenizer
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case
or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars
):
__a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type'''))
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__SCREAMING_SNAKE_CASE)
__a = do_lower_case
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None):
'''simple docstring'''
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
| 49
| 1
|
from math import asin, atan, cos, radians, sin, sqrt, tan
__snake_case :List[Any] = 6_3_7_8_1_3_7.0
__snake_case :Optional[int] = 6_3_5_6_7_5_2.3_1_4_2_4_5
__snake_case :Tuple = 637_8137
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = (AXIS_A - AXIS_B) / AXIS_A
__a = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__a = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__a = radians(_UpperCAmelCase )
__a = radians(_UpperCAmelCase )
# Equation
__a = sin((phi_a - phi_a) / 2 )
__a = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__a = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Any = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__snake_case :List[Any] = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __snake_case ( _UpperCAmelCase ):
__a = EfficientNetConfig()
__a = CONFIG_MAP[model_name]['''hidden_dim''']
__a = CONFIG_MAP[model_name]['''width_coef''']
__a = CONFIG_MAP[model_name]['''depth_coef''']
__a = CONFIG_MAP[model_name]['''image_size''']
__a = CONFIG_MAP[model_name]['''dropout_rate''']
__a = CONFIG_MAP[model_name]['''dw_padding''']
__a = '''huggingface/label-files'''
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( ):
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
def __snake_case ( _UpperCAmelCase ):
__a = CONFIG_MAP[model_name]['''image_size''']
__a = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCAmelCase , )
return preprocessor
def __snake_case ( _UpperCAmelCase ):
__a = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
__a = sorted(set(_UpperCAmelCase ) )
__a = len(_UpperCAmelCase )
__a = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )}
__a = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
__a = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
__a = {}
for item in rename_keys:
if item[0] in original_param_names:
__a = '''efficientnet.''' + item[1]
__a = '''classifier.weight'''
__a = '''classifier.bias'''
return key_mapping
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__a = key_mapping[key]
if "_conv" in key and "kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__a = torch.from_numpy(np.transpose(_UpperCAmelCase ) )
else:
__a = torch.from_numpy(_UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCAmelCase )
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = model_classes[model_name](
include_top=_UpperCAmelCase , weights='''imagenet''' , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , )
__a = original_model.trainable_variables
__a = original_model.non_trainable_variables
__a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__a = param.numpy()
__a = list(tf_params.keys() )
# Load HuggingFace model
__a = get_efficientnet_config(_UpperCAmelCase )
__a = EfficientNetForImageClassification(_UpperCAmelCase ).eval()
__a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
__a = rename_keys(_UpperCAmelCase )
replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Initialize preprocessor and preprocess input image
__a = convert_image_processor(_UpperCAmelCase )
__a = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__a = hf_model(**_UpperCAmelCase )
__a = outputs.logits.detach().numpy()
# Original model inference
__a = False
__a = CONFIG_MAP[model_name]['''image_size''']
__a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__a = image.img_to_array(_UpperCAmelCase )
__a = np.expand_dims(_UpperCAmelCase , axis=0 )
__a = original_model.predict(_UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCAmelCase ):
os.mkdir(_UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCAmelCase )
preprocessor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'Pushing converted {model_name} to the hub...' )
__a = f'efficientnet-{model_name}'
preprocessor.push_to_hub(_UpperCAmelCase )
hf_model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__snake_case :Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 49
| 1
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__snake_case :Any = False
__snake_case :List[str] = logging.get_logger(__name__)
__snake_case :int = '''ybelkada/fonts'''
def __snake_case ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
requires_backends(_UpperCAmelCase , ['''torch'''] )
_check_torch_version()
__a = image_tensor.unsqueeze(0 )
__a = torch.nn.functional.unfold(_UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__a = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _UpperCAmelCase , _UpperCAmelCase , -1 )
__a = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = 36 , _UpperCAmelCase = "black" , _UpperCAmelCase = "white" , _UpperCAmelCase = 5 , _UpperCAmelCase = 5 , _UpperCAmelCase = 5 , _UpperCAmelCase = 5 , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
requires_backends(_UpperCAmelCase , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__a = textwrap.TextWrapper(width=80 )
__a = wrapper.wrap(text=_UpperCAmelCase )
__a = '''\n'''.join(_UpperCAmelCase )
if font_bytes is not None and font_path is None:
__a = io.BytesIO(_UpperCAmelCase )
elif font_path is not None:
__a = font_path
else:
__a = hf_hub_download(_UpperCAmelCase , '''Arial.TTF''' )
__a = ImageFont.truetype(_UpperCAmelCase , encoding='''UTF-8''' , size=_UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__a = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , _UpperCAmelCase ) )
__a , __a , __a , __a = temp_draw.textbbox((0, 0) , _UpperCAmelCase , _UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
__a = text_width + left_padding + right_padding
__a = text_height + top_padding + bottom_padding
__a = Image.new('''RGB''' , (image_width, image_height) , _UpperCAmelCase )
__a = ImageDraw.Draw(_UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=_UpperCAmelCase , fill=_UpperCAmelCase , font=_UpperCAmelCase )
return image
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(_UpperCAmelCase , '''vision''' )
# Convert to PIL image if necessary
__a = to_pil_image(_UpperCAmelCase )
__a = render_text(_UpperCAmelCase , **_UpperCAmelCase )
__a = max(header_image.width , image.width )
__a = int(image.height * (new_width / image.width) )
__a = int(header_image.height * (new_width / header_image.width) )
__a = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__a = to_numpy_array(_UpperCAmelCase )
if infer_channel_dimension_format(_UpperCAmelCase ) == ChannelDimension.LAST:
__a = to_channel_dimension_format(_UpperCAmelCase , ChannelDimension.LAST )
return new_image
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = ['''flattened_patches''']
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : int = 2_048 , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
__a = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
__a = do_normalize
__a = do_convert_rgb
__a = max_patches
__a = is_vqa
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : dict , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , '''torch''')
_check_torch_version()
# convert to torch
__a = to_channel_dimension_format(__SCREAMING_SNAKE_CASE , ChannelDimension.FIRST)
__a = torch.from_numpy(__SCREAMING_SNAKE_CASE)
__a , __a = patch_size['''height'''], patch_size['''width''']
__a , __a = get_image_size(__SCREAMING_SNAKE_CASE)
# maximize scale s.t.
__a = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
__a = max(min(math.floor(scale * image_height / patch_height) , __SCREAMING_SNAKE_CASE) , 1)
__a = max(min(math.floor(scale * image_width / patch_width) , __SCREAMING_SNAKE_CASE) , 1)
__a = max(num_feasible_rows * patch_height , 1)
__a = max(num_feasible_cols * patch_width , 1)
__a = torch.nn.functional.interpolate(
image.unsqueeze(0) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE , antialias=__SCREAMING_SNAKE_CASE , ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
__a = torch_extract_patches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = patches.shape
__a = patches_shape[1]
__a = patches_shape[2]
__a = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__a = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
__a = torch.arange(__SCREAMING_SNAKE_CASE).reshape([rows, 1]).repeat(1 , __SCREAMING_SNAKE_CASE).reshape([rows * columns, 1])
__a = torch.arange(__SCREAMING_SNAKE_CASE).reshape([1, columns]).repeat(__SCREAMING_SNAKE_CASE , 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__a = row_ids.to(torch.floataa)
__a = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__a = torch.cat([row_ids, col_ids, patches] , -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
__a = torch.nn.functional.pad(__SCREAMING_SNAKE_CASE , [0, 0, 0, max_patches - (rows * columns)]).float()
__a = to_numpy_array(__SCREAMING_SNAKE_CASE)
return result
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
if image.dtype == np.uinta:
__a = image.astype(np.floataa)
# take mean across the whole `image`
__a = np.mean(__SCREAMING_SNAKE_CASE)
__a = np.std(__SCREAMING_SNAKE_CASE)
__a = max(__SCREAMING_SNAKE_CASE , 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : Optional[int] , ):
'''simple docstring'''
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = patch_size if patch_size is not None else self.patch_size
__a = max_patches if max_patches is not None else self.max_patches
__a = self.is_vqa
if kwargs.get('''data_format''' , __SCREAMING_SNAKE_CASE) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''')
__a = make_list_of_images(__SCREAMING_SNAKE_CASE)
if not valid_images(__SCREAMING_SNAKE_CASE):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(__SCREAMING_SNAKE_CASE) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(__SCREAMING_SNAKE_CASE) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''')
__a = kwargs.pop('''font_bytes''' , __SCREAMING_SNAKE_CASE)
__a = kwargs.pop('''font_path''' , __SCREAMING_SNAKE_CASE)
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = [header_text] * len(__SCREAMING_SNAKE_CASE)
__a = [
render_header(__SCREAMING_SNAKE_CASE , header_text[i] , font_bytes=__SCREAMING_SNAKE_CASE , font_path=__SCREAMING_SNAKE_CASE)
for i, image in enumerate(__SCREAMING_SNAKE_CASE)
]
if do_normalize:
__a = [self.normalize(image=__SCREAMING_SNAKE_CASE) for image in images]
# convert to torch tensor and permute
__a = [
self.extract_flattened_patches(image=__SCREAMING_SNAKE_CASE , max_patches=__SCREAMING_SNAKE_CASE , patch_size=__SCREAMING_SNAKE_CASE)
for image in images
]
# create attention mask in numpy
__a = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
__a = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=__SCREAMING_SNAKE_CASE)
return encoded_outputs
| 49
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case :Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
__snake_case :Tuple = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
__snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 49
| 1
|
import numpy as np
import qiskit
def __snake_case ( _UpperCAmelCase = 8 , _UpperCAmelCase = None ):
__a = np.random.default_rng(seed=_UpperCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__a = 6 * key_len
# Measurement basis for Alice's qubits.
__a = rng.integers(2 , size=_UpperCAmelCase )
# The set of states Alice will prepare.
__a = rng.integers(2 , size=_UpperCAmelCase )
# Measurement basis for Bob's qubits.
__a = rng.integers(2 , size=_UpperCAmelCase )
# Quantum Circuit to simulate BB84
__a = qiskit.QuantumCircuit(_UpperCAmelCase , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_UpperCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_UpperCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_UpperCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_UpperCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_UpperCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__a = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__a = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1 , seed_simulator=_UpperCAmelCase )
# Returns the result of measurement.
__a = job.result().get_counts(_UpperCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__a = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__a = gen_key[:key_len] if len(_UpperCAmelCase ) >= key_len else gen_key.ljust(_UpperCAmelCase , '''0''' )
return key
if __name__ == "__main__":
print(f'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 49
|
from collections import defaultdict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = first_str.lower().strip()
__a = second_str.lower().strip()
# Remove whitespace
__a = first_str.replace(''' ''' , '''''' )
__a = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
# Default values for count should be 0
__a = defaultdict(_UpperCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case :Any = input('''Enter the first string ''').strip()
__snake_case :int = input('''Enter the second string ''').strip()
__snake_case :int = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 49
| 1
|
def __snake_case ( ):
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __snake_case ( _UpperCAmelCase ):
__a = 1
__a = 2
while i * i <= n:
__a = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __snake_case ( ):
return next(i for i in triangle_number_generator() if count_divisors(_UpperCAmelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 49
|
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case :Any = logging.getLogger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = '''masked_bert'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=30_522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : List[str]="topK" , __SCREAMING_SNAKE_CASE : List[Any]="constant" , __SCREAMING_SNAKE_CASE : int=0.0 , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = pruning_method
__a = mask_init
__a = mask_scale
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case :Any = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[Any] = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Dict = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__snake_case :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _A :
UpperCamelCase__ : Optional[Union[str, Path]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = 1
UpperCamelCase__ : Optional[Union[str, bool]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
| 49
| 1
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "x" , _UpperCAmelCase = 10**-10 , _UpperCAmelCase = 1 , ):
__a = symbols(_UpperCAmelCase )
__a = lambdify(_UpperCAmelCase , _UpperCAmelCase )
__a = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
__a = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
__a = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__a = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'{newton_raphson("exp(x) - 1", 10, precision=0.0_0_5)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 49
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Union[str, Any] = logging.get_logger(__name__)
__snake_case :Any = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''switch_transformers'''
UpperCamelCase__ : Optional[Any] = ['''past_key_values''']
UpperCamelCase__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=32_128 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict="float32" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : List[str]=0.0_01 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
__a = vocab_size
__a = d_model
__a = d_kv
__a = d_ff
__a = num_sparse_encoder_layers
__a = num_layers
__a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__a = self.num_layers // self.num_sparse_encoder_layers
else:
__a = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__a = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__a = self.num_decoder_layers # HACK: this will create 0 sparse layers
__a = num_heads
__a = num_experts
__a = expert_capacity
__a = router_bias
__a = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}')
__a = router_dtype
__a = router_ignore_padding_tokens
__a = relative_attention_num_buckets
__a = relative_attention_max_distance
__a = dropout_rate
__a = layer_norm_epsilon
__a = initializer_factor
__a = feed_forward_proj
__a = use_cache
__a = add_router_probs
__a = router_z_loss_coef
__a = router_aux_loss_coef
__a = self.feed_forward_proj.split('''-''')
__a = act_info[-1]
__a = act_info[0] == '''gated'''
if len(__SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__a = '''gelu_new'''
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 49
| 1
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__snake_case :Any = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__snake_case :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 49
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__snake_case :List[Any] = logging.getLogger(__name__)
class _A :
def __init__( self : List[str]):
'''simple docstring'''
__a = False
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
if not self.initialized:
__a = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = True
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.retriever.index.init_index()
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a , __a = self.retriever._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return doc_ids, retrieved_doc_embeds
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None):
'''simple docstring'''
if index is not None and index.is_initialized() and len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''')
super().__init__(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for worker in self.retrieval_workers
])
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
logger.info('''initializing retrieval''')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
__a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
__a , __a = ray.get(random_worker.retrieve.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
else:
__a , __a = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return super(__SCREAMING_SNAKE_CASE , cls).get_tokenizers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE) or RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE)
__a = rag_tokenizer.question_encoder
__a = rag_tokenizer.generator
if indexed_dataset is not None:
__a = '''custom'''
__a = CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE)
else:
__a = cls._build_index(__SCREAMING_SNAKE_CASE)
return cls(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , retrieval_workers=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
| 49
| 1
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__a = flax_key_tuple[:-1] + ('''weight''',)
__a = torch.permute(_UpperCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase ):
# linear layer
__a = flax_key_tuple[:-1] + ('''weight''',)
__a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__a = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if "metadata" in layer:
__a = layer.split('''metadata''' )
__a = ''''''.join(split_layer[0] )[:-1]
__a = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__a = layer.split('''kvstore''' )
__a = ''''''.join(split_layer[0] )[:-1]
__a = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__a = layer.split('''/''' )
__a = '''/'''.join(split_layer[:-1] )
__a = (split_layer[-1],)
if "kvstore/path" in layer:
__a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
__a = '''file'''
else:
__a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = rename_keys(_UpperCAmelCase )
__a = {}
for k, v in current_block.items():
__a = v
__a = new_current_block
torch.save(_UpperCAmelCase , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = WEIGHTS_NAME ):
__a = convert_file_size_to_int(_UpperCAmelCase )
__a = []
__a = {}
__a = 0
__a = 0
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
__a = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__a = flatten_dict(_UpperCAmelCase , sep='''/''' )
__a = {}
for layer in checkpoint_info.keys():
__a , __a , __a = get_key_and_tensorstore_dict(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if curr_real_layer_name in all_layers:
__a = content
else:
__a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__a = torch.tensor(_UpperCAmelCase )
__a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__a , __a = rename_base_flax_keys(tuple(key.split('''/''' ) ) , _UpperCAmelCase )
__a = '''/'''.join(_UpperCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__a = os.path.join(
_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{len(_UpperCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
__a = {}
__a = 0
__a = raw_weights.to(getattr(_UpperCAmelCase , _UpperCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__a = os.path.join(_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{len(_UpperCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_UpperCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__a = {}
__a = {}
for idx, shard in enumerate(_UpperCAmelCase ):
__a = weights_name.replace(
'''.bin''' , f'-{idx+1:05d}-of-{len(_UpperCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
__a = os.path.join(_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
__a = shard
for key in shard:
__a = shard_file
# Add the metadata
__a = {'''total_size''': total_size}
__a = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , '''w''' , encoding='''utf-8''' ) as f:
__a = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + '''\n'''
f.write(_UpperCAmelCase )
return metadata, index
if __name__ == "__main__":
__snake_case :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
__snake_case :int = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __snake_case ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__a = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__a = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
__a = TaTokenizer.from_pretrained('''t5-small''' )
__a = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__a = tokenizer(_UpperCAmelCase , return_tensors='''pt''' ).input_ids
__a = model.generate(_UpperCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 49
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = BigBirdConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
__a = BigBirdForQuestionAnswering(_UpperCAmelCase )
else:
__a = BigBirdForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
__snake_case :Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 49
| 1
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _A :
UpperCamelCase__ : Optional[Union[str, Path]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = 1
UpperCamelCase__ : Optional[Union[str, bool]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
| 49
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
__a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def __snake_case ( _UpperCAmelCase ):
__a = ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(_UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : PreTrainedTokenizerBase
UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = -100
UpperCamelCase__ : str = "pt"
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids''']).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 49
| 1
|
def __snake_case ( _UpperCAmelCase = 1000 ):
__a = 2**power
__a = str(_UpperCAmelCase )
__a = list(_UpperCAmelCase )
__a = 0
for i in list_num:
sum_of_num += int(_UpperCAmelCase )
return sum_of_num
if __name__ == "__main__":
__snake_case :str = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
__snake_case :List[str] = solution(power)
print('''Sum of the digits is: ''', result)
| 49
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ):
__a , __a = 9, 14 # noqa: F841
__a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a = defaultdict(_UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a = mst(_UpperCAmelCase )
__a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a = tuple(answer[:2] )
__a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 49
| 1
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__snake_case :List[str] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Dict="This is a photo of {}."):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=[image] , return_tensors=self.framework)
__a = candidate_labels
__a = [hypothesis_template.format(__SCREAMING_SNAKE_CASE) for x in candidate_labels]
__a = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE)
__a = [text_inputs]
return inputs
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = model_inputs.pop('''candidate_labels''')
__a = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = model_outputs.pop('''candidate_labels''')
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=-1).squeeze(-1)
__a = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = [scores]
elif self.framework == "tf":
__a = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1)
__a = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) , key=lambda __SCREAMING_SNAKE_CASE: -x[0])
]
return result
| 49
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case :Tuple = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Any = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[Any] = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__snake_case :Dict = '''bart'''
__snake_case :Tuple = True
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__a = qar_model.eval()
else:
__a , __a = (None, None)
if MODEL_TYPE == "bart":
__a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__a = sas_model.eval()
else:
__a , __a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = faiss.StandardGpuResources()
__a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__a = faiss.IndexFlatIP(128 )
__a = faiss.index_cpu_to_gpu(_UpperCAmelCase , 1 , _UpperCAmelCase )
wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU
else:
__a , __a = (None, None)
__a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
__a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__a = elia['''train_eli5''']
__a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__snake_case ,__snake_case ,__snake_case :List[str] = load_indexes()
__snake_case ,__snake_case ,__snake_case ,__snake_case :Dict = load_models()
__snake_case ,__snake_case :Tuple = load_train_data()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=10 ):
__a = embed_questions_for_retrieval([question] , _UpperCAmelCase , _UpperCAmelCase )
__a , __a = eli5_train_q_index.search(_UpperCAmelCase , _UpperCAmelCase )
__a = [elia_train[int(_UpperCAmelCase )] for i in I[0]]
return nn_examples
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="wiki40b" , _UpperCAmelCase="dense" , _UpperCAmelCase=10 ):
if source == "none":
__a , __a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a = query_qa_dense_index(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__a , __a = query_es_index(
_UpperCAmelCase , _UpperCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCAmelCase , )
__a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__a = '''question: {} context: {}'''.format(_UpperCAmelCase , _UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None),
} )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0.8 ):
with torch.no_grad():
__a = qa_sas_generate(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_answers=1 , num_beams=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase , do_sample=_UpperCAmelCase , temp=_UpperCAmelCase , top_p=_UpperCAmelCase , top_k=_UpperCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__snake_case :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__snake_case :int = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__snake_case :int = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__snake_case :Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__snake_case :int = st.sidebar.checkbox('''Demo options''')
if demo_options:
__snake_case :str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__snake_case :Tuple = action_list.index(action_st)
__snake_case :Optional[int] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__snake_case :Dict = show_type == '''Show full text of passages'''
else:
__snake_case :Dict = 3
__snake_case :str = True
__snake_case :Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__snake_case :List[str] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__snake_case :Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__snake_case :Optional[int] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__snake_case :Optional[int] = '''wiki40b'''
__snake_case :Dict = '''dense'''
__snake_case :Dict = '''beam'''
__snake_case :int = 2
__snake_case :str = 64
__snake_case :Tuple = 256
__snake_case :int = None
__snake_case :List[Any] = None
__snake_case :int = st.sidebar.checkbox('''Generation options''')
if generate_options:
__snake_case :Tuple = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__snake_case :Tuple = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__snake_case :Dict = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__snake_case :Dict = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__snake_case :List[str] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__snake_case :Tuple = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
__snake_case :Any = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
__snake_case :Any = None
# start main text
__snake_case :Dict = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__snake_case :int = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__snake_case :Optional[int] = st.text_input('''Enter your question here:''', '''''')
else:
__snake_case :Optional[int] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__snake_case ,__snake_case :int = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__snake_case ,__snake_case :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__snake_case :Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__snake_case :Union[str, Any] = support_list[:10]
__snake_case :Optional[int] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__snake_case ,__snake_case :Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__snake_case ,__snake_case :Optional[int] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__snake_case :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__snake_case :int = res[1].strip()
if sec_titles == "":
__snake_case :List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
__snake_case :Optional[int] = sec_titles.split(''' & ''')
__snake_case :str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__snake_case :str = find_nearest_training(question)
__snake_case :str = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__snake_case :Optional[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__snake_case :Tuple = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49
| 1
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = tempfile.mkdtemp()
__a = 5
# Realm tok
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , '''realm_tokenizer''')
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
__a = os.path.join(__SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
__a = os.path.join(self.tmpdirname , '''realm_block_records''')
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer'''))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = RealmConfig(num_block_records=self.num_block_records)
return config
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
})
return dataset
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = np.array(
[
B'''This is the first record''',
B'''This is the second record''',
B'''This is the third record''',
B'''This is the fourth record''',
B'''This is the fifth record''',
B'''This is a longer longer longer record''',
] , dtype=__SCREAMING_SNAKE_CASE , )
return block_records
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.get_config()
__a = self.get_dummy_retriever()
__a = retriever.tokenizer
__a = np.array([0, 3] , dtype='''long''')
__a = tokenizer(['''Test question''']).input_ids
__a = tokenizer(
['''the fourth'''] , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , ).input_ids
__a = config.reader_seq_len
__a , __a , __a , __a = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , answer_ids=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2)
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2)
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.get_config()
__a = self.get_dummy_retriever()
__a = retriever.tokenizer
__a = np.array([0, 3, 5] , dtype='''long''')
__a = tokenizer(['''Test question''']).input_ids
__a = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , ).input_ids
__a = config.reader_seq_len
__a , __a , __a , __a = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , answer_ids=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
self.assertEqual([False, True, True] , __SCREAMING_SNAKE_CASE)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __SCREAMING_SNAKE_CASE)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records'''))
# Test local path
__a = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records'''))
self.assertEqual(retriever.block_records[0] , B'''This is the first record''')
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''') as mock_hf_hub_download:
__a = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''') , _REALM_BLOCK_RECORDS_FILENAME)
__a = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''')
self.assertEqual(retriever.block_records[0] , B'''This is the first record''')
| 49
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A ( __UpperCAmelCase ):
def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = eval_examples
__a = post_process_function
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
__a = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
__a = gen_kwargs
__a = self.eval_dataset if eval_dataset is None else eval_dataset
__a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE)
__a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
else:
__a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE)
return metrics
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE)
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''')
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
| 49
| 1
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case :Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
__snake_case :Tuple = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
__snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 49
|
from __future__ import annotations
from typing import Any
def __snake_case ( _UpperCAmelCase ):
if not postfix_notation:
return 0
__a = {'''+''', '''-''', '''*''', '''/'''}
__a = []
for token in postfix_notation:
if token in operations:
__a , __a = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_UpperCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__snake_case :Dict = '''bart'''
__snake_case :Tuple = True
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__a = qar_model.eval()
else:
__a , __a = (None, None)
if MODEL_TYPE == "bart":
__a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__a = sas_model.eval()
else:
__a , __a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = faiss.StandardGpuResources()
__a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__a = faiss.IndexFlatIP(128 )
__a = faiss.index_cpu_to_gpu(_UpperCAmelCase , 1 , _UpperCAmelCase )
wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU
else:
__a , __a = (None, None)
__a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
__a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__a = elia['''train_eli5''']
__a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__snake_case ,__snake_case ,__snake_case :List[str] = load_indexes()
__snake_case ,__snake_case ,__snake_case ,__snake_case :Dict = load_models()
__snake_case ,__snake_case :Tuple = load_train_data()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=10 ):
__a = embed_questions_for_retrieval([question] , _UpperCAmelCase , _UpperCAmelCase )
__a , __a = eli5_train_q_index.search(_UpperCAmelCase , _UpperCAmelCase )
__a = [elia_train[int(_UpperCAmelCase )] for i in I[0]]
return nn_examples
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="wiki40b" , _UpperCAmelCase="dense" , _UpperCAmelCase=10 ):
if source == "none":
__a , __a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a = query_qa_dense_index(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__a , __a = query_es_index(
_UpperCAmelCase , _UpperCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCAmelCase , )
__a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__a = '''question: {} context: {}'''.format(_UpperCAmelCase , _UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None),
} )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0.8 ):
with torch.no_grad():
__a = qa_sas_generate(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_answers=1 , num_beams=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase , do_sample=_UpperCAmelCase , temp=_UpperCAmelCase , top_p=_UpperCAmelCase , top_k=_UpperCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__snake_case :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__snake_case :int = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__snake_case :int = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__snake_case :Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__snake_case :int = st.sidebar.checkbox('''Demo options''')
if demo_options:
__snake_case :str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__snake_case :Tuple = action_list.index(action_st)
__snake_case :Optional[int] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__snake_case :Dict = show_type == '''Show full text of passages'''
else:
__snake_case :Dict = 3
__snake_case :str = True
__snake_case :Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__snake_case :List[str] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__snake_case :Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__snake_case :Optional[int] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__snake_case :Optional[int] = '''wiki40b'''
__snake_case :Dict = '''dense'''
__snake_case :Dict = '''beam'''
__snake_case :int = 2
__snake_case :str = 64
__snake_case :Tuple = 256
__snake_case :int = None
__snake_case :List[Any] = None
__snake_case :int = st.sidebar.checkbox('''Generation options''')
if generate_options:
__snake_case :Tuple = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__snake_case :Tuple = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__snake_case :Dict = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__snake_case :Dict = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__snake_case :List[str] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__snake_case :Tuple = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
__snake_case :Any = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
__snake_case :Any = None
# start main text
__snake_case :Dict = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__snake_case :int = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__snake_case :Optional[int] = st.text_input('''Enter your question here:''', '''''')
else:
__snake_case :Optional[int] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__snake_case ,__snake_case :int = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__snake_case ,__snake_case :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__snake_case :Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__snake_case :Union[str, Any] = support_list[:10]
__snake_case :Optional[int] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__snake_case ,__snake_case :Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__snake_case ,__snake_case :Optional[int] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__snake_case :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__snake_case :int = res[1].strip()
if sec_titles == "":
__snake_case :List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
__snake_case :Optional[int] = sec_titles.split(''' & ''')
__snake_case :str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__snake_case :str = find_nearest_training(question)
__snake_case :str = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__snake_case :Optional[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__snake_case :Tuple = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case :Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case :List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 )
__a = parent_a[:random_slice] + parent_a[random_slice:]
__a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = []
# Generate more children proportionally to the fitness score.
__a = int(parent_a[1] * 100 ) + 1
__a = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
__a = population_score[random.randint(0 , _UpperCAmelCase )][0]
__a , __a = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
__a = []
for _ in range(_UpperCAmelCase ):
population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__a = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case :Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case :List[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 49
| 1
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=None , _UpperCAmelCase="no" , _UpperCAmelCase="29500" ):
__a = False
__a = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
__a = True
elif "IPython" in sys.modules:
__a = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
__a = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , _UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
__a = 8
__a = PrepareForLaunch(_UpperCAmelCase , distributed_type='''TPU''' )
print(f'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*_UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='''127.0.01''' , master_port=_UpperCAmelCase , mixed_precision=_UpperCAmelCase ):
__a = PrepareForLaunch(_UpperCAmelCase , distributed_type='''MULTI_GPU''' )
print(f'Launching training on {num_processes} GPUs.' )
try:
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__a = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
__a = PrepareForLaunch(_UpperCAmelCase , debug=_UpperCAmelCase )
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='''fork''' )
| 49
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = LxmertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = LxmertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 49
| 1
|
from __future__ import annotations
from fractions import Fraction
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __snake_case ( _UpperCAmelCase ):
__a = []
__a = 11
__a = int('''1''' + '''0''' * digit_len )
for num in range(_UpperCAmelCase , _UpperCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_UpperCAmelCase , _UpperCAmelCase ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
__a = 10
return solutions
def __snake_case ( _UpperCAmelCase = 2 ):
__a = 1.0
for fraction in fraction_list(_UpperCAmelCase ):
__a = Fraction(_UpperCAmelCase )
result *= frac.denominator / frac.numerator
return int(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 49
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ):
__a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__a = f'{olid} is not a valid Open Library olid'
raise ValueError(_UpperCAmelCase )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def __snake_case ( _UpperCAmelCase ):
__a = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__a = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__a = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = ''', '''.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 49
| 1
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __snake_case ( _UpperCAmelCase ):
__a = {}
__a = tokenizer(example['''content'''] , truncation=_UpperCAmelCase )['''input_ids''']
__a = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
__snake_case :Any = HfArgumentParser(PretokenizationArguments)
__snake_case :Dict = parser.parse_args()
if args.num_workers is None:
__snake_case :List[Any] = multiprocessing.cpu_count()
__snake_case :Dict = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case :Optional[int] = time.time()
__snake_case :Any = load_dataset(args.dataset_name, split='''train''')
print(f'Dataset loaded in {time.time()-t_start:.2f}s')
__snake_case :Optional[Any] = time.time()
__snake_case :Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'Dataset tokenized in {time.time()-t_start:.2f}s')
__snake_case :Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 49
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths}
__a = Text(
cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory)
return dataset
| 49
| 1
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
__a = nn.Parameter(_UpperCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
__a = nn.Parameter(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# set torch weights for 1-to-1 comparison
__a = np.asarray(weights[0] )
__a = np.asarray(weights[1] )
__a = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_UpperCAmelCase ).view(-1 , _UpperCAmelCase ).contiguous().transpose(0 , 1 ) , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# set torch weights for 1-to-1 comparison
__a = np.asarray(weights[0] )
__a = np.asarray(weights[1] )
__a = np.asarray(weights[2] )
__a = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_UpperCAmelCase ).view(-1 , _UpperCAmelCase ).contiguous().transpose(0 , 1 ) , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# layernorm 1
__a = weights[0][0][0]
__a = np.asarray(layer_norm_a[0] )
__a = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , )
# lsh weights + output
__a = weights[0][1]
if len(_UpperCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(_UpperCAmelCase , torch_block.attention , _UpperCAmelCase )
else:
set_layer_weights_in_torch_local(_UpperCAmelCase , torch_block.attention , _UpperCAmelCase )
# intermediate weighs
__a = weights[2][0][1][2]
# Chunked Feed Forward
if len(_UpperCAmelCase ) == 4:
__a = intermediate_weights[2]
# layernorm 2
__a = np.asarray(intermediate_weights[0][0] )
__a = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , )
# intermediate dense
__a = np.asarray(intermediate_weights[1][0] )
__a = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , )
# intermediate out
__a = np.asarray(intermediate_weights[4][0] )
__a = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# reformer model
__a = torch_model.reformer
# word embeds
__a = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_UpperCAmelCase ) , )
if isinstance(weights[3] , _UpperCAmelCase ):
__a = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__a = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
__a = nn.Parameter(torch.tensor(_UpperCAmelCase ) )
__a = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_UpperCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__a = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# output layer norm
__a = np.asarray(weights[7][0] )
__a = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , )
# output embeddings
__a = np.asarray(weights[9][0] )
__a = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = ReformerConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = ReformerModelWithLMHead(_UpperCAmelCase )
with open(_UpperCAmelCase , '''rb''' ) as f:
__a = pickle.load(_UpperCAmelCase )['''weights''']
set_model_weights_in_torch(_UpperCAmelCase , _UpperCAmelCase , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :Optional[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 49
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case :List[str] = '''\
Text data.
Second line of data.'''
__snake_case :Optional[Any] = '''file'''
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__a = bytes(_UpperCAmelCase , '''utf-8''' )
with zstd.open(_UpperCAmelCase , '''wb''' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__a = input_paths[compression_format]
__a = tmp_path / '''cache'''
__a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
__a = f.read()
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''custom_cache'''
__a = '''custom_extracted_dir'''
__a = tmp_path / '''custom_extracted_path'''
if default_extracted:
__a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) )
__a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a = xz_file
__a = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
__a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
__a = '''./__missing_file__.txt'''
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = get_from_cache(f'tmp://{tmpfs_file}' )
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( ):
with pytest.raises(_UpperCAmelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head('''s3://huggingface.co''' )
| 49
| 1
|
from timeit import timeit
def __snake_case ( _UpperCAmelCase ):
if number < 0:
raise ValueError('''the value of input must not be negative''' )
__a = 0
while number:
number &= number - 1
result += 1
return result
def __snake_case ( _UpperCAmelCase ):
if number < 0:
raise ValueError('''the value of input must not be negative''' )
__a = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __snake_case ( ):
def do_benchmark(_UpperCAmelCase ) -> None:
__a = '''import __main__ as z'''
print(f'Benchmark when {number = }:' )
print(f'{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }' )
__a = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=_UpperCAmelCase )
print(f'timeit() runs in {timing} seconds' )
print(f'{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }' )
__a = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=_UpperCAmelCase , )
print(f'timeit() runs in {timing} seconds' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 49
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE)
return config
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = self.dummy_sample_deter + 0.1
__a = self.dummy_sample_deter - 0.1
__a = samplea.shape[0]
__a = torch.stack([samplea, samplea, samplea] , dim=0)
__a = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE)
__a = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
__a = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 11_53.18_33) < 1E-2
assert abs(result_mean.item() - 0.50_05) < 1E-3
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_58.96_06) < 1E-2
assert abs(result_mean.item() - 0.33_72) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''')
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_02.02_96) < 1E-2
assert abs(result_mean.item() - 0.26_31) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
__a = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE):
if i == len(__SCREAMING_SNAKE_CASE) - 1:
__a = -1
else:
__a = timesteps[i + 1]
__a = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE)
__a = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
__a = len(__SCREAMING_SNAKE_CASE)
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
| 49
| 1
|
def __snake_case ( _UpperCAmelCase = 600851475143 ):
try:
__a = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__a = 2
__a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__a = i
while n % i == 0:
__a = n // i
i += 1
return int(_UpperCAmelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 49
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__snake_case :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
requires_backends(self , '''torch''')
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
self.check_model_type(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = {}
__a = {}
__a = {}
# preprocess args
if "points_per_batch" in kwargs:
__a = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__a = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__a = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__a = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__a = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__a = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__a = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__a = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__a = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__a = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__a = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__a = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
__a = self.image_processor.size['''longest_edge''']
__a , __a , __a , __a = self.image_processor.generate_crop_boxes(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
with self.device_placement():
if self.framework == "pt":
__a = self.get_inference_context()
with inference_context():
__a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values'''))
__a = image_embeddings
__a = grid_points.shape[1]
__a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''')
for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = grid_points[:, i : i + points_per_batch, :, :]
__a = input_labels[:, i : i + points_per_batch]
__a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ):
'''simple docstring'''
__a = model_inputs.pop('''input_boxes''')
__a = model_inputs.pop('''is_last''')
__a = model_inputs.pop('''original_sizes''').tolist()
__a = model_inputs.pop('''reshaped_input_sizes''').tolist()
__a = self.model(**__SCREAMING_SNAKE_CASE)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__a = model_outputs['''pred_masks''']
__a = self.image_processor.post_process_masks(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE)
__a = model_outputs['''iou_scores''']
__a , __a , __a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ):
'''simple docstring'''
__a = []
__a = []
__a = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores'''))
all_masks.extend(model_output.pop('''masks'''))
all_boxes.append(model_output.pop('''boxes'''))
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a , __a , __a , __a = self.image_processor.post_process_for_mask_generation(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = defaultdict(__SCREAMING_SNAKE_CASE)
for output in model_outputs:
for k, v in output.items():
extra[k].append(__SCREAMING_SNAKE_CASE)
__a = {}
if output_rle_mask:
__a = rle_mask
if output_bboxes_mask:
__a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 49
| 1
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , **__SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = eval_examples
__a = post_process_function
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str = "eval"):
'''simple docstring'''
__a = self.eval_dataset if eval_dataset is None else eval_dataset
__a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE)
__a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__a = time.time()
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions)
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
else:
__a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE)
return metrics
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : str = "test"):
'''simple docstring'''
__a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE)
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__a = time.time()
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''')
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
| 49
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__snake_case :str = logging.get_logger(__name__)
__snake_case :int = {'''vocab_file''': '''vocab.txt'''}
__snake_case :List[Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__snake_case :List[str] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__snake_case :Optional[int] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int = ConvBertTokenizer
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case
or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars
):
__a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type'''))
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__SCREAMING_SNAKE_CASE)
__a = do_lower_case
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None):
'''simple docstring'''
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
| 49
| 1
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case :int = logging.get_logger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : int = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , __SCREAMING_SNAKE_CASE : str="<unk>" , __SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , __SCREAMING_SNAKE_CASE : str=125 , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
__a = [F'<extra_id_{i}>' for i in range(__SCREAMING_SNAKE_CASE)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__a = len(set(filter(lambda __SCREAMING_SNAKE_CASE: bool('''extra_id''' in str(__SCREAMING_SNAKE_CASE)) , __SCREAMING_SNAKE_CASE)))
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''')
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else pad_token
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else eos_token
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else unk_token
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , extra_ids=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = extra_ids
__a = 2**8 # utf is 8 bits
# define special tokens dict
__a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__a = len(self.special_tokens_encoder)
__a = len(__SCREAMING_SNAKE_CASE)
for i, token in enumerate(__SCREAMING_SNAKE_CASE):
__a = self.vocab_size + i - n
__a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__SCREAMING_SNAKE_CASE)) + [1]
return ([0] * len(__SCREAMING_SNAKE_CASE)) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE)) + [1]
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int]):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
''' eos tokens being added.''')
return token_ids
else:
return token_ids + [self.eos_token_id]
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE)
if token_ids_a is None:
return token_ids_a
else:
__a = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE)
return token_ids_a + token_ids_a
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = [chr(__SCREAMING_SNAKE_CASE) for i in text.encode('''utf-8''')]
return tokens
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
if token in self.special_tokens_encoder:
__a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__a = self.added_tokens_encoder[token]
elif len(__SCREAMING_SNAKE_CASE) != 1:
__a = self.unk_token_id
else:
__a = ord(__SCREAMING_SNAKE_CASE) + self._num_special_tokens
return token_id
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
if index in self.special_tokens_decoder:
__a = self.special_tokens_decoder[index]
else:
__a = chr(index - self._num_special_tokens)
return token
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = B''''''
for token in tokens:
if token in self.special_tokens_decoder:
__a = self.special_tokens_decoder[token].encode('''utf-8''')
elif token in self.added_tokens_decoder:
__a = self.special_tokens_decoder[token].encode('''utf-8''')
elif token in self.special_tokens_encoder:
__a = token.encode('''utf-8''')
elif token in self.added_tokens_encoder:
__a = token.encode('''utf-8''')
else:
__a = bytes([ord(__SCREAMING_SNAKE_CASE)])
bstring += tok_string
__a = bstring.decode('''utf-8''' , errors='''ignore''')
return string
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
return ()
| 49
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Any = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__snake_case :List[Any] = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __snake_case ( _UpperCAmelCase ):
__a = EfficientNetConfig()
__a = CONFIG_MAP[model_name]['''hidden_dim''']
__a = CONFIG_MAP[model_name]['''width_coef''']
__a = CONFIG_MAP[model_name]['''depth_coef''']
__a = CONFIG_MAP[model_name]['''image_size''']
__a = CONFIG_MAP[model_name]['''dropout_rate''']
__a = CONFIG_MAP[model_name]['''dw_padding''']
__a = '''huggingface/label-files'''
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( ):
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
def __snake_case ( _UpperCAmelCase ):
__a = CONFIG_MAP[model_name]['''image_size''']
__a = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCAmelCase , )
return preprocessor
def __snake_case ( _UpperCAmelCase ):
__a = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
__a = sorted(set(_UpperCAmelCase ) )
__a = len(_UpperCAmelCase )
__a = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )}
__a = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
__a = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
__a = {}
for item in rename_keys:
if item[0] in original_param_names:
__a = '''efficientnet.''' + item[1]
__a = '''classifier.weight'''
__a = '''classifier.bias'''
return key_mapping
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__a = key_mapping[key]
if "_conv" in key and "kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__a = torch.from_numpy(np.transpose(_UpperCAmelCase ) )
else:
__a = torch.from_numpy(_UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCAmelCase )
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = model_classes[model_name](
include_top=_UpperCAmelCase , weights='''imagenet''' , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , )
__a = original_model.trainable_variables
__a = original_model.non_trainable_variables
__a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__a = param.numpy()
__a = list(tf_params.keys() )
# Load HuggingFace model
__a = get_efficientnet_config(_UpperCAmelCase )
__a = EfficientNetForImageClassification(_UpperCAmelCase ).eval()
__a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
__a = rename_keys(_UpperCAmelCase )
replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Initialize preprocessor and preprocess input image
__a = convert_image_processor(_UpperCAmelCase )
__a = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__a = hf_model(**_UpperCAmelCase )
__a = outputs.logits.detach().numpy()
# Original model inference
__a = False
__a = CONFIG_MAP[model_name]['''image_size''']
__a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__a = image.img_to_array(_UpperCAmelCase )
__a = np.expand_dims(_UpperCAmelCase , axis=0 )
__a = original_model.predict(_UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCAmelCase ):
os.mkdir(_UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCAmelCase )
preprocessor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'Pushing converted {model_name} to the hub...' )
__a = f'efficientnet-{model_name}'
preprocessor.push_to_hub(_UpperCAmelCase )
hf_model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__snake_case :Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 49
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _A ( __UpperCAmelCase ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = params
__a = np.array(__SCREAMING_SNAKE_CASE)
__a = np.array([len(__SCREAMING_SNAKE_CASE) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any]):
'''simple docstring'''
return len(self.lengths)
def _lowerCamelCase ( self : str):
'''simple docstring'''
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.params.max_model_input_size
__a = self.lengths > max_len
logger.info(F'Splitting {sum(__SCREAMING_SNAKE_CASE)} too long sequences.')
def divide_chunks(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any):
return [l[i : i + n] for i in range(0 , len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)]
__a = []
__a = []
if self.params.mlm:
__a , __a = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__a , __a = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
__a = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
__a = np.insert(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE)
if sub_s[-1] != sep_id:
__a = np.insert(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
assert len(__SCREAMING_SNAKE_CASE) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__SCREAMING_SNAKE_CASE)
new_tok_ids.extend(__SCREAMING_SNAKE_CASE)
new_lengths.extend([len(__SCREAMING_SNAKE_CASE) for l in sub_seqs])
__a = np.array(__SCREAMING_SNAKE_CASE)
__a = np.array(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = len(self)
__a = self.lengths > 11
__a = self.token_ids[indices]
__a = self.lengths[indices]
__a = len(self)
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.')
def _lowerCamelCase ( self : int):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a = self.params.special_tok_ids['''unk_token''']
__a = len(self)
__a = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
__a = (unk_occs / self.lengths) < 0.5
__a = self.token_ids[indices]
__a = self.lengths[indices]
__a = len(self)
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).')
def _lowerCamelCase ( self : Any):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'{len(self)} sequences')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = [t[0] for t in batch]
__a = [t[1] for t in batch]
assert len(__SCREAMING_SNAKE_CASE) == len(__SCREAMING_SNAKE_CASE)
# Max for paddings
__a = max(__SCREAMING_SNAKE_CASE)
# Pad token ids
if self.params.mlm:
__a = self.params.special_tok_ids['''pad_token''']
else:
__a = self.params.special_tok_ids['''unk_token''']
__a = [list(t.astype(__SCREAMING_SNAKE_CASE)) + [pad_idx] * (max_seq_len_ - len(__SCREAMING_SNAKE_CASE)) for t in token_ids]
assert len(tk_) == len(__SCREAMING_SNAKE_CASE)
assert all(len(__SCREAMING_SNAKE_CASE) == max_seq_len_ for t in tk_)
__a = torch.tensor(tk_) # (bs, max_seq_len_)
__a = torch.tensor(__SCREAMING_SNAKE_CASE) # (bs)
return tk_t, lg_t
| 49
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case :Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
__snake_case :Tuple = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
__snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 49
| 1
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Optional[int] = logging.get_logger(__name__)
__snake_case :Union[str, Any] = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''autoformer'''
UpperCamelCase__ : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = "student_t" , __SCREAMING_SNAKE_CASE : str = "nll" , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : List[int] = [1, 2, 3, 4, 5, 6, 7] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : int = 64 , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : str = "gelu" , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : int = 100 , __SCREAMING_SNAKE_CASE : float = 0.02 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 25 , __SCREAMING_SNAKE_CASE : int = 3 , **__SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
__a = prediction_length
__a = context_length if context_length is not None else prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__SCREAMING_SNAKE_CASE) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''')
__a = cardinality
else:
__a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__SCREAMING_SNAKE_CASE) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''')
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(self.lags_sequence) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
# Autoformer
__a = label_length
__a = moving_average
__a = autocorrelation_factor
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@property
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 49
|
from collections import defaultdict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = first_str.lower().strip()
__a = second_str.lower().strip()
# Remove whitespace
__a = first_str.replace(''' ''' , '''''' )
__a = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
# Default values for count should be 0
__a = defaultdict(_UpperCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case :Any = input('''Enter the first string ''').strip()
__snake_case :int = input('''Enter the second string ''').strip()
__snake_case :int = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 49
| 1
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case :List[str] = '''\
Text data.
Second line of data.'''
__snake_case :Optional[Any] = '''file'''
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__a = bytes(_UpperCAmelCase , '''utf-8''' )
with zstd.open(_UpperCAmelCase , '''wb''' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__a = input_paths[compression_format]
__a = tmp_path / '''cache'''
__a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
__a = f.read()
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''custom_cache'''
__a = '''custom_extracted_dir'''
__a = tmp_path / '''custom_extracted_path'''
if default_extracted:
__a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) )
__a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a = xz_file
__a = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
__a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
__a = '''./__missing_file__.txt'''
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = get_from_cache(f'tmp://{tmpfs_file}' )
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( ):
with pytest.raises(_UpperCAmelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head('''s3://huggingface.co''' )
| 49
|
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case :Any = logging.getLogger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = '''masked_bert'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=30_522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : List[str]="topK" , __SCREAMING_SNAKE_CASE : List[Any]="constant" , __SCREAMING_SNAKE_CASE : int=0.0 , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = pruning_method
__a = mask_init
__a = mask_scale
| 49
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Union[str, Any] = logging.get_logger(__name__)
__snake_case :int = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = '''glpn'''
def __init__( self : Any , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : Dict=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE : int=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE : Optional[int]=[32, 64, 160, 256] , __SCREAMING_SNAKE_CASE : Tuple=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : List[Any]=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : Optional[Any]=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE : Optional[int]=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Tuple=64 , __SCREAMING_SNAKE_CASE : Dict=10 , __SCREAMING_SNAKE_CASE : Optional[int]=-1 , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
__a = num_channels
__a = num_encoder_blocks
__a = depths
__a = sr_ratios
__a = hidden_sizes
__a = patch_sizes
__a = strides
__a = mlp_ratios
__a = num_attention_heads
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = drop_path_rate
__a = layer_norm_eps
__a = decoder_hidden_size
__a = max_depth
__a = head_in_index
| 49
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _A :
UpperCamelCase__ : Optional[Union[str, Path]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = 1
UpperCamelCase__ : Optional[Union[str, bool]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
| 49
| 1
|
from math import pi, sqrt
def __snake_case ( _UpperCAmelCase ):
if num <= 0:
raise ValueError('''math domain error''' )
if num > 1_71.5:
raise OverflowError('''math range error''' )
elif num - int(_UpperCAmelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(_UpperCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __snake_case ( ):
assert gamma(0.5 ) == sqrt(_UpperCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case :Union[str, Any] = 1.0
while num:
__snake_case :Union[str, Any] = float(input('''Gamma of: '''))
print(f'gamma({num}) = {gamma(num)}')
print('''\nEnter 0 to exit...''')
| 49
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Union[str, Any] = logging.get_logger(__name__)
__snake_case :Any = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''switch_transformers'''
UpperCamelCase__ : Optional[Any] = ['''past_key_values''']
UpperCamelCase__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=32_128 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict="float32" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : List[str]=0.0_01 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
__a = vocab_size
__a = d_model
__a = d_kv
__a = d_ff
__a = num_sparse_encoder_layers
__a = num_layers
__a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__a = self.num_layers // self.num_sparse_encoder_layers
else:
__a = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__a = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__a = self.num_decoder_layers # HACK: this will create 0 sparse layers
__a = num_heads
__a = num_experts
__a = expert_capacity
__a = router_bias
__a = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}')
__a = router_dtype
__a = router_ignore_padding_tokens
__a = relative_attention_num_buckets
__a = relative_attention_max_distance
__a = dropout_rate
__a = layer_norm_epsilon
__a = initializer_factor
__a = feed_forward_proj
__a = use_cache
__a = add_router_probs
__a = router_z_loss_coef
__a = router_aux_loss_coef
__a = self.feed_forward_proj.split('''-''')
__a = act_info[-1]
__a = act_info[0] == '''gated'''
if len(__SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__a = '''gelu_new'''
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 49
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''van'''
def __init__( self : List[str] , __UpperCAmelCase : int=224 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Optional[Any]=[7, 3, 3, 3] , __UpperCAmelCase : Tuple=[4, 2, 2, 2] , __UpperCAmelCase : Optional[Any]=[64, 128, 320, 512] , __UpperCAmelCase : List[str]=[3, 3, 12, 3] , __UpperCAmelCase : Optional[int]=[8, 8, 4, 4] , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Optional[Any]=1e-6 , __UpperCAmelCase : str=1e-2 , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : Optional[Any]=0.0 , **__UpperCAmelCase : List[Any] , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
a = image_size
a = num_channels
a = patch_sizes
a = strides
a = hidden_sizes
a = depths
a = mlp_ratios
a = hidden_act
a = initializer_range
a = layer_norm_eps
a = layer_scale_init_value
a = drop_path_rate
a = dropout_rate
| 0
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__snake_case :List[Any] = logging.getLogger(__name__)
class _A :
def __init__( self : List[str]):
'''simple docstring'''
__a = False
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
if not self.initialized:
__a = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = True
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.retriever.index.init_index()
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a , __a = self.retriever._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return doc_ids, retrieved_doc_embeds
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None):
'''simple docstring'''
if index is not None and index.is_initialized() and len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''')
super().__init__(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for worker in self.retrieval_workers
])
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
logger.info('''initializing retrieval''')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
__a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
__a , __a = ray.get(random_worker.retrieve.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
else:
__a , __a = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return super(__SCREAMING_SNAKE_CASE , cls).get_tokenizers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE) or RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE)
__a = rag_tokenizer.question_encoder
__a = rag_tokenizer.generator
if indexed_dataset is not None:
__a = '''custom'''
__a = CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE)
else:
__a = cls._build_index(__SCREAMING_SNAKE_CASE)
return cls(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , retrieval_workers=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
| 49
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
SCREAMING_SNAKE_CASE_: Tuple ={
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 10_00,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 10_00,
'block_out_channels': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
SCREAMING_SNAKE_CASE_: Tuple ={
'sample_size': 2_56,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
SCREAMING_SNAKE_CASE_: Any ={
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
SCREAMING_SNAKE_CASE_: List[str] ={
'num_train_timesteps': 2_01,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
SCREAMING_SNAKE_CASE_: List[Any] ={
'num_train_timesteps': 1_51,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Dict=False ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Any , snake_case_ : Any=None ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(snake_case_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(snake_case_ ):
UpperCAmelCase_ = f"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = f"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ , has_skip=snake_case_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(snake_case_ ):
UpperCAmelCase_ = f"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = f"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ , has_skip=snake_case_ )
UpperCAmelCase_ = f"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = f"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
current_layer += 1
if i != len(snake_case_ ) - 1:
UpperCAmelCase_ = f"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = f"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(snake_case_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = f"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = f"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ , has_skip=snake_case_ )
current_layer += 1
if i != len(snake_case_ ) - 1:
UpperCAmelCase_ = f"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = f"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = f"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = f"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ , has_skip=snake_case_ )
UpperCAmelCase_ = f"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = f"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
current_layer += 1
if i != len(snake_case_ ) - 1:
UpperCAmelCase_ = f"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = f"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Tuple =argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[Any] =strabool(args.class_cond)
SCREAMING_SNAKE_CASE_: List[Any] =os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
SCREAMING_SNAKE_CASE_: int =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
SCREAMING_SNAKE_CASE_: Tuple =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
SCREAMING_SNAKE_CASE_: Any =TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
SCREAMING_SNAKE_CASE_: Any =con_pt_to_diffuser(args.unet_path, unet_config)
SCREAMING_SNAKE_CASE_: str =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
SCREAMING_SNAKE_CASE_: str =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
SCREAMING_SNAKE_CASE_: List[Any] =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
SCREAMING_SNAKE_CASE_: Optional[Any] =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
SCREAMING_SNAKE_CASE_: Union[str, Any] =CMStochasticIterativeScheduler(**scheduler_config)
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 1
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = BigBirdConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
__a = BigBirdForQuestionAnswering(_UpperCAmelCase )
else:
__a = BigBirdForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
__snake_case :Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 49
| 0
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = """mvp"""
lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""]
lowerCAmelCase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : Any , UpperCamelCase : Optional[int]=50267 , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=4096 , UpperCamelCase : Dict=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=1024 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=1 , UpperCamelCase : int=0 , UpperCamelCase : int=2 , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=False , UpperCamelCase : int=100 , UpperCamelCase : Optional[Any]=800 , **UpperCamelCase : str , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = classifier_dropout
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_prompt
lowercase__ = prompt_length
lowercase__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ):
lowercase__ = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
| 2
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
__a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def __snake_case ( _UpperCAmelCase ):
__a = ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(_UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : PreTrainedTokenizerBase
UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = -100
UpperCamelCase__ : str = "pt"
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids''']).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 49
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : int = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''bridgetower_vision_model'''
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=288 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=1e-05 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : List[str] = hidden_size
A : Optional[int] = num_hidden_layers
A : Dict = num_channels
A : Tuple = patch_size
A : Optional[Any] = image_size
A : Dict = initializer_factor
A : List[Any] = layer_norm_eps
A : Optional[int] = stop_gradient
A : Optional[Any] = share_layernorm
A : Optional[int] = remove_last_layer
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
A, A : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if config_dict.get('''model_type''' ) == "bridgetower":
A : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''bridgetower_text_model'''
def __init__( self , SCREAMING_SNAKE_CASE=50265 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=514 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=1e-05 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : List[Any] = vocab_size
A : int = hidden_size
A : List[Any] = num_hidden_layers
A : Any = num_attention_heads
A : Any = hidden_act
A : Union[str, Any] = initializer_factor
A : Dict = intermediate_size
A : Dict = hidden_dropout_prob
A : List[str] = attention_probs_dropout_prob
A : Any = max_position_embeddings
A : int = type_vocab_size
A : List[Any] = layer_norm_eps
A : List[Any] = position_embedding_type
A : List[Any] = use_cache
A : List[str] = pad_token_id
A : Dict = bos_token_id
A : Tuple = eos_token_id
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if config_dict.get('''model_type''' ) == "bridgetower":
A : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''bridgetower'''
def __init__( self , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=1e-05 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="add" , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = kwargs.pop('''text_config_dict''' , SCREAMING_SNAKE_CASE )
A : Dict = kwargs.pop('''vision_config_dict''' , SCREAMING_SNAKE_CASE )
super().__init__(**SCREAMING_SNAKE_CASE )
A : int = share_cross_modal_transformer_layers
A : str = hidden_act
A : List[Any] = hidden_size
A : List[Any] = initializer_factor
A : List[Any] = layer_norm_eps
A : Optional[Any] = share_link_tower_layers
A : Optional[Any] = link_tower_type
A : Union[str, Any] = num_attention_heads
A : str = num_hidden_layers
A : int = tie_word_embeddings
A : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
A : Any = {}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
A : List[str] = {}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
A : Union[str, Any] = BridgeTowerTextConfig(**SCREAMING_SNAKE_CASE )
A : Dict = BridgeTowerVisionConfig(**SCREAMING_SNAKE_CASE )
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = copy.deepcopy(self.__dict__ )
A : List[str] = self.text_config.to_dict()
A : Tuple = self.vision_config.to_dict()
A : Any = self.__class__.model_type
return output
| 3
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ):
__a , __a = 9, 14 # noqa: F841
__a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a = defaultdict(_UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a = mst(_UpperCAmelCase )
__a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a = tuple(answer[:2] )
__a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 49
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = '''dpt'''
def __init__( self : int , UpperCAmelCase__ : List[Any]=7_6_8 , UpperCAmelCase__ : Optional[Any]=1_2 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : List[str]=3_0_7_2 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : List[Any]=1E-12 , UpperCAmelCase__ : List[str]=3_8_4 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=[2, 5, 8, 1_1] , UpperCAmelCase__ : Union[str, Any]="project" , UpperCAmelCase__ : List[Any]=[4, 2, 1, 0.5] , UpperCAmelCase__ : List[Any]=[9_6, 1_9_2, 3_8_4, 7_6_8] , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Dict=0.4 , UpperCAmelCase__ : Any=2_5_5 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=[1, 1_0_2_4, 2_4, 2_4] , UpperCAmelCase__ : Any=[0, 1] , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : str , ) -> int:
super().__init__(**UpperCAmelCase__ )
lowerCAmelCase = hidden_size
lowerCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
lowerCAmelCase = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
lowerCAmelCase = BitConfig(**UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
logger.info('Initializing the config with a `BiT` backbone.' )
lowerCAmelCase = BitConfig(**UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowerCAmelCase = backbone_featmap_shape
lowerCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = []
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = qkv_bias
lowerCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
lowerCAmelCase = readout_type
lowerCAmelCase = reassemble_factors
lowerCAmelCase = neck_hidden_sizes
lowerCAmelCase = fusion_hidden_size
lowerCAmelCase = head_in_index
lowerCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase = use_auxiliary_head
lowerCAmelCase = auxiliary_loss_weight
lowerCAmelCase = semantic_loss_ignore_index
lowerCAmelCase = semantic_classifier_dropout
def __UpperCAmelCase ( self : str ) -> Dict:
lowerCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 4
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
| 49
| 0
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_lowercase =FileLock(str(tmpdir / '''foo.lock''' ) )
_lowercase =FileLock(str(tmpdir / '''foo.lock''' ) )
_lowercase =0.01
with locka.acquire():
with pytest.raises(__snake_case ):
_lowercase =time.time()
locka.acquire(__snake_case )
assert time.time() - _start > timeout
def UpperCAmelCase_ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''a''' * 1000 + '''.lock'''
_lowercase =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 255
_lowercase =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__snake_case ):
locka.acquire(0 )
| 5
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__snake_case :Dict = '''bart'''
__snake_case :Tuple = True
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__a = qar_model.eval()
else:
__a , __a = (None, None)
if MODEL_TYPE == "bart":
__a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__a = sas_model.eval()
else:
__a , __a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = faiss.StandardGpuResources()
__a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__a = faiss.IndexFlatIP(128 )
__a = faiss.index_cpu_to_gpu(_UpperCAmelCase , 1 , _UpperCAmelCase )
wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU
else:
__a , __a = (None, None)
__a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
__a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__a = elia['''train_eli5''']
__a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__snake_case ,__snake_case ,__snake_case :List[str] = load_indexes()
__snake_case ,__snake_case ,__snake_case ,__snake_case :Dict = load_models()
__snake_case ,__snake_case :Tuple = load_train_data()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=10 ):
__a = embed_questions_for_retrieval([question] , _UpperCAmelCase , _UpperCAmelCase )
__a , __a = eli5_train_q_index.search(_UpperCAmelCase , _UpperCAmelCase )
__a = [elia_train[int(_UpperCAmelCase )] for i in I[0]]
return nn_examples
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="wiki40b" , _UpperCAmelCase="dense" , _UpperCAmelCase=10 ):
if source == "none":
__a , __a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a = query_qa_dense_index(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__a , __a = query_es_index(
_UpperCAmelCase , _UpperCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCAmelCase , )
__a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__a = '''question: {} context: {}'''.format(_UpperCAmelCase , _UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None),
} )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0.8 ):
with torch.no_grad():
__a = qa_sas_generate(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_answers=1 , num_beams=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase , do_sample=_UpperCAmelCase , temp=_UpperCAmelCase , top_p=_UpperCAmelCase , top_k=_UpperCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__snake_case :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__snake_case :int = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__snake_case :int = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__snake_case :Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__snake_case :int = st.sidebar.checkbox('''Demo options''')
if demo_options:
__snake_case :str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__snake_case :Tuple = action_list.index(action_st)
__snake_case :Optional[int] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__snake_case :Dict = show_type == '''Show full text of passages'''
else:
__snake_case :Dict = 3
__snake_case :str = True
__snake_case :Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__snake_case :List[str] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__snake_case :Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__snake_case :Optional[int] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__snake_case :Optional[int] = '''wiki40b'''
__snake_case :Dict = '''dense'''
__snake_case :Dict = '''beam'''
__snake_case :int = 2
__snake_case :str = 64
__snake_case :Tuple = 256
__snake_case :int = None
__snake_case :List[Any] = None
__snake_case :int = st.sidebar.checkbox('''Generation options''')
if generate_options:
__snake_case :Tuple = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__snake_case :Tuple = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__snake_case :Dict = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__snake_case :Dict = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__snake_case :List[str] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__snake_case :Tuple = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
__snake_case :Any = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
__snake_case :Any = None
# start main text
__snake_case :Dict = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__snake_case :int = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__snake_case :Optional[int] = st.text_input('''Enter your question here:''', '''''')
else:
__snake_case :Optional[int] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__snake_case ,__snake_case :int = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__snake_case ,__snake_case :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__snake_case :Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__snake_case :Union[str, Any] = support_list[:10]
__snake_case :Optional[int] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__snake_case ,__snake_case :Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__snake_case ,__snake_case :Optional[int] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__snake_case :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__snake_case :int = res[1].strip()
if sec_titles == "":
__snake_case :List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
__snake_case :Optional[int] = sec_titles.split(''' & ''')
__snake_case :str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__snake_case :str = find_nearest_training(question)
__snake_case :str = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__snake_case :Optional[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__snake_case :Tuple = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
A : List[Any] = False
@skip_mps
class __A( a , a , a , unittest.TestCase ):
snake_case_ = StableDiffusionAttendAndExcitePipeline
snake_case_ = False
snake_case_ = TEXT_TO_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> List[str]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> int:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , )
__a = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__a = CLIPTextModel(_snake_case )
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=0 ) -> Any:
'''simple docstring'''
if str(_snake_case ).startswith('''mps''' ):
__a = torch.manual_seed(_snake_case )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__a = __a = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs(_snake_case )
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__a = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] )
__a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_snake_case , 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __A( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Dict:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = torch.manual_seed(51 )
__a = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=_snake_case , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
__a = '''a painting of an elephant with glasses'''
__a = [5, 7]
__a = pipe(
prompt=_snake_case , token_indices=_snake_case , guidance_scale=7.5 , generator=_snake_case , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 6
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A ( __UpperCAmelCase ):
def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = eval_examples
__a = post_process_function
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
__a = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
__a = gen_kwargs
__a = self.eval_dataset if eval_dataset is None else eval_dataset
__a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE)
__a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
else:
__a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE)
return metrics
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE)
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''')
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
| 49
| 0
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowercase_ = 128022
lowercase_ = 128028
@require_sentencepiece
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = MaMaaaTokenizer
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
def snake_case__ ( self : Tuple )-> Dict:
'''simple docstring'''
super().setUp()
A__ = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
A__ = dict(zip(lowercase_,range(len(lowercase_ ) ) ) )
A__ = Path(self.tmpdirname )
save_json(lowercase_,save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowercase_,save_dir / VOCAB_FILES_NAMES['spm_file'] )
A__ = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Tuple,**lowercase_ : Any )-> Any:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname,**lowercase_ )
def snake_case__ ( self : Dict,lowercase_ : List[Any] )-> List[str]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def snake_case__ ( self : Tuple )-> Optional[Any]:
'''simple docstring'''
A__ = '</s>'
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
A__ = self.get_tokenizer()
A__ = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0],'</s>' )
self.assertEqual(vocab_keys[1],'<unk>' )
self.assertEqual(vocab_keys[-1],'<s>' )
self.assertEqual(len(lowercase_ ),tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def snake_case__ ( self : str )-> str:
'''simple docstring'''
pass
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
A__ = self.get_tokenizer()
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ),[2, 3, 4, 5, 6],)
A__ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] )
A__ = tokenizer.convert_tokens_to_string(lowercase_ )
self.assertEqual(lowercase_,'This is a test' )
@slow
def snake_case__ ( self : Dict )-> Union[str, Any]:
'''simple docstring'''
A__ = {'input_ids': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_,model_name='facebook/m2m100_418M',revision='c168bae485c864188cf9aa0e4108b0b6934dc91e',)
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = 'facebook/m2m100_418M'
lowerCamelCase = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
lowerCamelCase = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
lowerCamelCase = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def snake_case__ ( cls : Optional[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name,src_lang='en',tgt_lang='fr' )
A__ = 1
return cls
def snake_case__ ( self : Union[str, Any] )-> List[str]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('ar' ),1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id('en' ),1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ),1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ),1_2_8_0_6_3 )
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
A__ = self.tokenizer.get_vocab()
self.assertEqual(len(lowercase_ ),self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'],3 )
self.assertIn(self.tokenizer.get_lang_token('en' ),lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> int:
'''simple docstring'''
A__ = 'en'
A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens,lowercase_ )
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
self.assertIn(lowercase_,self.tokenizer.all_special_ids )
# fmt: off
A__ = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
A__ = self.tokenizer.decode(lowercase_,skip_special_tokens=lowercase_ )
A__ = self.tokenizer.decode(generated_ids[1:],skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_,lowercase_ )
self.assertNotIn(self.tokenizer.eos_token,lowercase_ )
def snake_case__ ( self : List[str] )-> int:
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowercase_ )
A__ = MaMaaaTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.lang_token_to_id,lowercase_ )
@require_torch
def snake_case__ ( self : List[Any] )-> List[Any]:
'''simple docstring'''
A__ = 'en'
A__ = 'fr'
A__ = self.tokenizer(self.src_text,text_target=self.tgt_text,padding=lowercase_,return_tensors='pt' )
A__ = shift_tokens_right(
batch['labels'],self.tokenizer.pad_token_id,self.tokenizer.eos_token_id )
for k in batch:
A__ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def snake_case__ ( self : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] )
A__ = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] )
@require_torch
def snake_case__ ( self : Optional[Any] )-> List[str]:
'''simple docstring'''
A__ = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
A__ = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def snake_case__ ( self : Union[str, Any] )-> Any:
'''simple docstring'''
A__ = self.tokenizer._build_translation_inputs('A test',return_tensors='pt',src_lang='en',tgt_lang='ar' )
self.assertEqual(
nested_simplify(lowercase_ ),{
# en_XX, A, test, EOS
'input_ids': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 1_2_8_0_0_6,
},)
| 7
|
from __future__ import annotations
from typing import Any
def __snake_case ( _UpperCAmelCase ):
if not postfix_notation:
return 0
__a = {'''+''', '''-''', '''*''', '''/'''}
__a = []
for token in postfix_notation:
if token in operations:
__a , __a = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_UpperCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = "Wav2Vec2FeatureExtractor"
SCREAMING_SNAKE_CASE : Dict = "AutoTokenizer"
def __init__( self : str , _UpperCamelCase : Dict , _UpperCamelCase : str ) ->Dict:
super().__init__(_UpperCamelCase , _UpperCamelCase )
snake_case_ = self.feature_extractor
snake_case_ = False
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : List[str] ) ->Dict:
try:
return super().from_pretrained(_UpperCamelCase , **_UpperCamelCase )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _UpperCamelCase , )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = WavaVecaCTCTokenizer.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
return cls(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
def __call__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Dict ) ->Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCamelCase , **_UpperCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
snake_case_ = kwargs.pop('''raw_speech''' )
else:
snake_case_ = kwargs.pop('''audio''' , _UpperCamelCase )
snake_case_ = kwargs.pop('''sampling_rate''' , _UpperCamelCase )
snake_case_ = kwargs.pop('''text''' , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
snake_case_ = args[0]
snake_case_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
snake_case_ = self.feature_extractor(_UpperCamelCase , *_UpperCamelCase , sampling_rate=_UpperCamelCase , **_UpperCamelCase )
if text is not None:
snake_case_ = self.tokenizer(_UpperCamelCase , **_UpperCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case_ = encodings['''input_ids''']
return inputs
def snake_case__( self : Any , *_UpperCamelCase : List[str] , **_UpperCamelCase : Optional[int] ) ->int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCamelCase , **_UpperCamelCase )
snake_case_ = kwargs.pop('''input_features''' , _UpperCamelCase )
snake_case_ = kwargs.pop('''labels''' , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
snake_case_ = args[0]
snake_case_ = args[1:]
if input_features is not None:
snake_case_ = self.feature_extractor.pad(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
if labels is not None:
snake_case_ = self.tokenizer.pad(_UpperCamelCase , **_UpperCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
snake_case_ = labels['''input_ids''']
return input_features
def snake_case__( self : Union[str, Any] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Optional[int]:
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : List[str] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : str ) ->Optional[Any]:
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@contextmanager
def snake_case__( self : str ) ->List[Any]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
snake_case_ = True
snake_case_ = self.tokenizer
yield
snake_case_ = self.feature_extractor
snake_case_ = False
| 8
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case :Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case :List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 )
__a = parent_a[:random_slice] + parent_a[random_slice:]
__a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = []
# Generate more children proportionally to the fitness score.
__a = int(parent_a[1] * 100 ) + 1
__a = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
__a = population_score[random.randint(0 , _UpperCAmelCase )][0]
__a , __a = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
__a = []
for _ in range(_UpperCAmelCase ):
population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__a = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case :Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case :List[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 49
| 0
|
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ): # This function is recursive
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__SCREAMING_SNAKE_CASE : Dict = array[0]
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : Dict = [element for element in array[i:] if element >= array[i]]
__SCREAMING_SNAKE_CASE : Optional[Any] = longest_subsequence(lowercase__ )
if len(lowercase__ ) > len(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = temp_array
else:
i += 1
__SCREAMING_SNAKE_CASE : Dict = [element for element in array[1:] if element >= pivot]
__SCREAMING_SNAKE_CASE : Tuple = [pivot, *longest_subsequence(lowercase__ )]
if len(lowercase__ ) > len(lowercase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = LxmertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = LxmertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 49
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__: Optional[int] ="lm_head"
lowerCamelCase__: Dict =getattr(__a , __a )
if weight_type is not None:
lowerCamelCase__: str =getattr(__a , __a ).shape
else:
lowerCamelCase__: int =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__: Dict =value
elif weight_type == "weight_g":
lowerCamelCase__: Optional[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__: int =value
elif weight_type == "bias":
lowerCamelCase__: List[str] =value
else:
lowerCamelCase__: Union[str, Any] =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: List[str] =fairseq_model.state_dict()
lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__: int =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: str =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__: Optional[Any] =True
if "*" in mapped_key:
lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2]
lowerCamelCase__: List[str] =mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCamelCase__: List[str] ="weight_g"
elif "weight_v" in name:
lowerCamelCase__: Union[str, Any] ="weight_v"
elif "bias" in name:
lowerCamelCase__: Dict ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__: Tuple ="weight"
else:
lowerCamelCase__: List[Any] =None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1]
lowerCamelCase__: List[str] =name.split("." )
lowerCamelCase__: str =int(items[0] )
lowerCamelCase__: Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__: Dict =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a )
else:
lowerCamelCase__: List[Any] =UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__: str =Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__: Any =target_dict.pad_index
lowerCamelCase__: int =target_dict.bos_index
lowerCamelCase__: Any =target_dict.eos_index
lowerCamelCase__: Dict =len(target_dict.symbols )
lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" )
if not os.path.isdir(__a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
lowerCamelCase__: Optional[Any] =target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__: Optional[Any] =42
lowerCamelCase__: List[Any] =43
with open(__a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__a , __a )
lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , )
lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False
lowerCamelCase__: Tuple =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
lowerCamelCase__: int =UniSpeechForCTC(__a )
else:
lowerCamelCase__: int =UniSpeechForPreTraining(__a )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__: List[str] =model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ):
__a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__a = f'{olid} is not a valid Open Library olid'
raise ValueError(_UpperCAmelCase )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def __snake_case ( _UpperCAmelCase ):
__a = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__a = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__a = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = ''', '''.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 49
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(a)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
requires_backends(self , "vision")
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING)
def _lowerCamelCase ( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None) -> Dict:
_A : Optional[int] = {}
_A : Tuple = {}
if prompt is not None:
_A : Dict = prompt
if generate_kwargs is not None:
_A : Union[str, Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A : int = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one")
_A : Dict = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return super().__call__(__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase=None) -> Optional[Any]:
_A : List[Any] = load_image(__lowerCamelCase)
if prompt is not None:
if not isinstance(__lowerCamelCase , __lowerCamelCase):
raise ValueError(
F"Received an invalid text input, got - {type(__lowerCamelCase)} - but expected a single string. "
"Note also that one single text can be provided for conditional image to text generation.")
_A : Optional[Any] = self.model.config.model_type
if model_type == "git":
_A : List[Any] = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework)
_A : Optional[int] = self.tokenizer(text=__lowerCamelCase , add_special_tokens=__lowerCamelCase).input_ids
_A : Any = [self.tokenizer.cls_token_id] + input_ids
_A : Any = torch.tensor(__lowerCamelCase).unsqueeze(0)
model_inputs.update({"input_ids": input_ids})
elif model_type == "pix2struct":
_A : List[str] = self.image_processor(images=__lowerCamelCase , header_text=__lowerCamelCase , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A : Optional[int] = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework)
_A : Dict = self.tokenizer(__lowerCamelCase , return_tensors=self.framework)
model_inputs.update(__lowerCamelCase)
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation")
else:
_A : Any = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
_A : List[Any] = None
return model_inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase=None) -> Dict:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , __lowerCamelCase)
and all(x is None for x in model_inputs["input_ids"])
):
_A : Tuple = None
if generate_kwargs is None:
_A : List[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A : List[str] = model_inputs.pop(self.model.main_input_name)
_A : int = self.model.generate(__lowerCamelCase , **__lowerCamelCase , **__lowerCamelCase)
return model_outputs
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
_A : List[str] = []
for output_ids in model_outputs:
_A : Any = {
"generated_text": self.tokenizer.decode(
__lowerCamelCase , skip_special_tokens=__lowerCamelCase , )
}
records.append(__lowerCamelCase)
return records
| 11
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths}
__a = Text(
cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory)
return dataset
| 49
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = 'deberta-v2'
def __init__( self: Optional[Any] , UpperCamelCase_: Union[str, Any]=12_81_00 , UpperCamelCase_: Optional[int]=15_36 , UpperCamelCase_: str=24 , UpperCamelCase_: Optional[Any]=24 , UpperCamelCase_: int=61_44 , UpperCamelCase_: Dict="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: List[Any]=0 , UpperCamelCase_: Any=0.02 , UpperCamelCase_: Tuple=1E-7 , UpperCamelCase_: List[Any]=False , UpperCamelCase_: Any=-1 , UpperCamelCase_: Tuple=0 , UpperCamelCase_: str=True , UpperCamelCase_: Any=None , UpperCamelCase_: List[Any]=0 , UpperCamelCase_: str="gelu" , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = relative_attention
__lowerCamelCase = max_relative_positions
__lowerCamelCase = pad_token_id
__lowerCamelCase = position_biased_input
# Backwards compatibility
if type(UpperCamelCase_ ) == str:
__lowerCamelCase = [x.strip() for x in pos_att_type.lower().split("""|""" )]
__lowerCamelCase = pos_att_type
__lowerCamelCase = vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = kwargs.get("""pooler_hidden_size""" , UpperCamelCase_ )
__lowerCamelCase = pooler_dropout
__lowerCamelCase = pooler_hidden_act
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: int ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowerCAmelCase__ ( self: Any ):
return 12
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional["TensorType"] = None , UpperCamelCase_: int = 3 , UpperCamelCase_: int = 40 , UpperCamelCase_: int = 40 , UpperCamelCase_: "PreTrainedTokenizerBase" = None , ):
__lowerCamelCase = super().generate_dummy_inputs(preprocessor=UpperCamelCase_ , framework=UpperCamelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 12
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case :List[str] = '''\
Text data.
Second line of data.'''
__snake_case :Optional[Any] = '''file'''
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__a = bytes(_UpperCAmelCase , '''utf-8''' )
with zstd.open(_UpperCAmelCase , '''wb''' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__a = input_paths[compression_format]
__a = tmp_path / '''cache'''
__a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
__a = f.read()
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''custom_cache'''
__a = '''custom_extracted_dir'''
__a = tmp_path / '''custom_extracted_path'''
if default_extracted:
__a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) )
__a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a = xz_file
__a = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
__a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
__a = '''./__missing_file__.txt'''
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = get_from_cache(f'tmp://{tmpfs_file}' )
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( ):
with pytest.raises(_UpperCAmelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head('''s3://huggingface.co''' )
| 49
| 0
|
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase : Optional[int] = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowerCAmelCase : int = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowerCAmelCase : Tuple = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32")),
"references": datasets.Sequence(datasets.Value("int32")),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32"),
"references": datasets.Value("int32"),
}) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : List[str]="binary" , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str="warn" , ):
SCREAMING_SNAKE_CASE_: List[str] = recall_score(
lowerCAmelCase__ , lowerCAmelCase__ , labels=lowerCAmelCase__ , pos_label=lowerCAmelCase__ , average=lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , zero_division=lowerCAmelCase__ , )
return {"recall": float(lowerCAmelCase__) if score.size == 1 else score}
| 13
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE)
return config
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = self.dummy_sample_deter + 0.1
__a = self.dummy_sample_deter - 0.1
__a = samplea.shape[0]
__a = torch.stack([samplea, samplea, samplea] , dim=0)
__a = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE)
__a = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
__a = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 11_53.18_33) < 1E-2
assert abs(result_mean.item() - 0.50_05) < 1E-3
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_58.96_06) < 1E-2
assert abs(result_mean.item() - 0.33_72) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''')
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_02.02_96) < 1E-2
assert abs(result_mean.item() - 0.26_31) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
__a = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE):
if i == len(__SCREAMING_SNAKE_CASE) - 1:
__a = -1
else:
__a = timesteps[i + 1]
__a = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE)
__a = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
__a = len(__SCREAMING_SNAKE_CASE)
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
| 49
| 0
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = (DDIMParallelScheduler,)
UpperCAmelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def SCREAMING_SNAKE_CASE ( self : str , **UpperCAmelCase__ : str) ->List[str]:
'''simple docstring'''
A__ = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase__)
return config
def SCREAMING_SNAKE_CASE ( self : int , **UpperCAmelCase__ : List[Any]) ->Tuple:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**UpperCAmelCase__)
A__ = scheduler_class(**UpperCAmelCase__)
A__ , A__ = 10, 0.0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__)
for t in scheduler.timesteps:
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase__)
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1)
A__ = scheduler_class(**UpperCAmelCase__)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1]))
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase__)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500]):
self.check_over_forward(time_step=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=UpperCAmelCase__ , eta=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400) - 0.14771)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960) - 0.32460)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998) - 0.02)) < 1e-5
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ , A__ = 10, 0.0
scheduler.set_timesteps(UpperCAmelCase__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = self.dummy_sample_deter + 0.1
A__ = self.dummy_sample_deter - 0.1
A__ = samplea.shape[0]
A__ = torch.stack([samplea, samplea, samplea] , dim=0)
A__ = torch.arange(UpperCAmelCase__)[0:3, None].repeat(1 , UpperCAmelCase__)
A__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
A__ = scheduler.batch_step_no_noise(UpperCAmelCase__ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , UpperCAmelCase__)
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 1147.7904) < 1e-2
assert abs(result_mean.item() - 0.4982) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
A__ = self.full_loop()
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 172.0067) < 1e-2
assert abs(result_mean.item() - 0.223967) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = self.full_loop(prediction_type='''v_prediction''')
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 52.5302) < 1e-2
assert abs(result_mean.item() - 0.0684) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.full_loop(set_alpha_to_one=UpperCAmelCase__ , beta_start=0.01)
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 149.8295) < 1e-2
assert abs(result_mean.item() - 0.1951) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
'''simple docstring'''
A__ = self.full_loop(set_alpha_to_one=UpperCAmelCase__ , beta_start=0.01)
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 149.0784) < 1e-2
assert abs(result_mean.item() - 0.1941) < 1e-3
| 14
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__snake_case :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
requires_backends(self , '''torch''')
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
self.check_model_type(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = {}
__a = {}
__a = {}
# preprocess args
if "points_per_batch" in kwargs:
__a = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__a = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__a = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__a = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__a = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__a = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__a = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__a = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__a = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__a = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__a = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__a = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
__a = self.image_processor.size['''longest_edge''']
__a , __a , __a , __a = self.image_processor.generate_crop_boxes(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
with self.device_placement():
if self.framework == "pt":
__a = self.get_inference_context()
with inference_context():
__a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values'''))
__a = image_embeddings
__a = grid_points.shape[1]
__a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''')
for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = grid_points[:, i : i + points_per_batch, :, :]
__a = input_labels[:, i : i + points_per_batch]
__a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ):
'''simple docstring'''
__a = model_inputs.pop('''input_boxes''')
__a = model_inputs.pop('''is_last''')
__a = model_inputs.pop('''original_sizes''').tolist()
__a = model_inputs.pop('''reshaped_input_sizes''').tolist()
__a = self.model(**__SCREAMING_SNAKE_CASE)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__a = model_outputs['''pred_masks''']
__a = self.image_processor.post_process_masks(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE)
__a = model_outputs['''iou_scores''']
__a , __a , __a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ):
'''simple docstring'''
__a = []
__a = []
__a = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores'''))
all_masks.extend(model_output.pop('''masks'''))
all_boxes.append(model_output.pop('''boxes'''))
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a , __a , __a , __a = self.image_processor.post_process_for_mask_generation(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = defaultdict(__SCREAMING_SNAKE_CASE)
for output in model_outputs:
for k, v in output.items():
extra[k].append(__SCREAMING_SNAKE_CASE)
__a = {}
if output_rle_mask:
__a = rle_mask
if output_bboxes_mask:
__a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 49
| 0
|
SCREAMING_SNAKE_CASE :List[str] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 15
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__snake_case :str = logging.get_logger(__name__)
__snake_case :int = {'''vocab_file''': '''vocab.txt'''}
__snake_case :List[Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__snake_case :List[str] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__snake_case :Optional[int] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int = ConvBertTokenizer
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case
or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars
):
__a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type'''))
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__SCREAMING_SNAKE_CASE)
__a = do_lower_case
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None):
'''simple docstring'''
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
| 49
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = ["pixel_values"]
def __init__( self : Optional[int] ,_snake_case : bool = True ,_snake_case : Dict[str, int] = None ,_snake_case : int = 0.9 ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : bool = True ,_snake_case : Dict[str, int] = None ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,**_snake_case : int ,) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase__ : Optional[Any] = get_size_dict(_snake_case ,default_to_square=_snake_case )
lowercase__ : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Dict = get_size_dict(_snake_case ,param_name='''crop_size''' )
lowercase__ : int = do_resize
lowercase__ : Optional[Any] = size
lowercase__ : Tuple = crop_pct
lowercase__ : int = resample
lowercase__ : Dict = do_center_crop
lowercase__ : int = crop_size
lowercase__ : List[Any] = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : List[Any] = do_normalize
lowercase__ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[float] = None ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[Any] ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowercase__ : Dict = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowercase__ : Tuple = int(size['''height'''] / crop_pct )
else:
lowercase__ : List[str] = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(_snake_case ) )
lowercase__ : List[Any] = get_resize_output_image_size(_snake_case ,size=_snake_case ,default_to_square=_snake_case )
else:
if "shortest_edge" in size:
lowercase__ : Tuple = get_resize_output_image_size(_snake_case ,size=size['''shortest_edge'''] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
lowercase__ : Optional[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(_snake_case ) )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : List[Any] ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Union[str, Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_snake_case ,size=(size['''height'''], size['''width''']) ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : np.ndarray ,_snake_case : Union[int, float] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : str ,) -> Union[str, Any]:
"""simple docstring"""
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : Union[float, List[float]] ,_snake_case : Union[float, List[float]] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[Any] ,) -> np.ndarray:
"""simple docstring"""
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : ImageInput ,_snake_case : bool = None ,_snake_case : Dict[str, int] = None ,_snake_case : int = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : Dict[str, int] = None ,_snake_case : bool = None ,_snake_case : float = None ,_snake_case : bool = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : ChannelDimension = ChannelDimension.FIRST ,**_snake_case : List[Any] ,) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : int = crop_pct if crop_pct is not None else self.crop_pct
lowercase__ : List[Any] = resample if resample is not None else self.resample
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Optional[Any] = size if size is not None else self.size
lowercase__ : Optional[Any] = get_size_dict(_snake_case ,default_to_square=_snake_case )
lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[Any] = get_size_dict(_snake_case ,param_name='''crop_size''' )
lowercase__ : Dict = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase__ : int = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowercase__ : Optional[Any] = [self.resize(image=_snake_case ,size=_snake_case ,crop_pct=_snake_case ,resample=_snake_case ) for image in images]
if do_center_crop:
lowercase__ : List[Any] = [self.center_crop(image=_snake_case ,size=_snake_case ) for image in images]
if do_rescale:
lowercase__ : List[Any] = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_normalize:
lowercase__ : Union[str, Any] = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images]
lowercase__ : List[Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
lowercase__ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 16
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Any = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__snake_case :List[Any] = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __snake_case ( _UpperCAmelCase ):
__a = EfficientNetConfig()
__a = CONFIG_MAP[model_name]['''hidden_dim''']
__a = CONFIG_MAP[model_name]['''width_coef''']
__a = CONFIG_MAP[model_name]['''depth_coef''']
__a = CONFIG_MAP[model_name]['''image_size''']
__a = CONFIG_MAP[model_name]['''dropout_rate''']
__a = CONFIG_MAP[model_name]['''dw_padding''']
__a = '''huggingface/label-files'''
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( ):
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
def __snake_case ( _UpperCAmelCase ):
__a = CONFIG_MAP[model_name]['''image_size''']
__a = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCAmelCase , )
return preprocessor
def __snake_case ( _UpperCAmelCase ):
__a = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
__a = sorted(set(_UpperCAmelCase ) )
__a = len(_UpperCAmelCase )
__a = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )}
__a = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
__a = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
__a = {}
for item in rename_keys:
if item[0] in original_param_names:
__a = '''efficientnet.''' + item[1]
__a = '''classifier.weight'''
__a = '''classifier.bias'''
return key_mapping
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__a = key_mapping[key]
if "_conv" in key and "kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__a = torch.from_numpy(np.transpose(_UpperCAmelCase ) )
else:
__a = torch.from_numpy(_UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCAmelCase )
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = model_classes[model_name](
include_top=_UpperCAmelCase , weights='''imagenet''' , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , )
__a = original_model.trainable_variables
__a = original_model.non_trainable_variables
__a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__a = param.numpy()
__a = list(tf_params.keys() )
# Load HuggingFace model
__a = get_efficientnet_config(_UpperCAmelCase )
__a = EfficientNetForImageClassification(_UpperCAmelCase ).eval()
__a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
__a = rename_keys(_UpperCAmelCase )
replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Initialize preprocessor and preprocess input image
__a = convert_image_processor(_UpperCAmelCase )
__a = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__a = hf_model(**_UpperCAmelCase )
__a = outputs.logits.detach().numpy()
# Original model inference
__a = False
__a = CONFIG_MAP[model_name]['''image_size''']
__a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__a = image.img_to_array(_UpperCAmelCase )
__a = np.expand_dims(_UpperCAmelCase , axis=0 )
__a = original_model.predict(_UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCAmelCase ):
os.mkdir(_UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCAmelCase )
preprocessor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'Pushing converted {model_name} to the hub...' )
__a = f'efficientnet-{model_name}'
preprocessor.push_to_hub(_UpperCAmelCase )
hf_model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__snake_case :Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 49
| 0
|
"""simple docstring"""
from __future__ import annotations
def _A ( UpperCamelCase_ : list[int | str]) -> None:
'''simple docstring'''
create_state_space_tree(UpperCamelCase_, [], 0, [0 for i in range(len(UpperCamelCase_))])
def _A ( UpperCamelCase_ : list[int | str], UpperCamelCase_ : list[int | str], UpperCamelCase_ : int, UpperCamelCase_ : list[int], ) -> None:
'''simple docstring'''
if index == len(UpperCamelCase_):
print(UpperCamelCase_)
return
for i in range(len(UpperCamelCase_)):
if not index_used[i]:
current_sequence.append(sequence[i])
__lowercase = True
create_state_space_tree(UpperCamelCase_, UpperCamelCase_, index + 1, UpperCamelCase_)
current_sequence.pop()
__lowercase = False
_a = [3, 1, 2, 4]
generate_all_permutations(sequence)
_a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 17
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case :Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
__snake_case :Tuple = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
__snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 49
| 0
|
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18
|
from collections import defaultdict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = first_str.lower().strip()
__a = second_str.lower().strip()
# Remove whitespace
__a = first_str.replace(''' ''' , '''''' )
__a = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
# Default values for count should be 0
__a = defaultdict(_UpperCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case :Any = input('''Enter the first string ''').strip()
__snake_case :int = input('''Enter the second string ''').strip()
__snake_case :int = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 49
| 0
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'Speech2TextFeatureExtractor'
lowerCAmelCase__ = 'Speech2TextTokenizer'
def __init__( self , lowercase , lowercase ) -> Optional[int]:
super().__init__(lowercase , lowercase )
lowerCamelCase_ = self.feature_extractor
lowerCamelCase_ = False
def __call__( self , *lowercase , **lowercase ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCamelCase_ = kwargs.pop("raw_speech" )
else:
lowerCamelCase_ = kwargs.pop("audio" , lowercase )
lowerCamelCase_ = kwargs.pop("sampling_rate" , lowercase )
lowerCamelCase_ = kwargs.pop("text" , lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = args[0]
lowerCamelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCamelCase_ = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
if text is not None:
lowerCamelCase_ = self.tokenizer(lowercase , **lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase_ = encodings["input_ids"]
return inputs
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> Any:
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> Dict:
return self.tokenizer.decode(*lowercase , **lowercase )
@contextmanager
def SCREAMING_SNAKE_CASE_( self ) -> Any:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer
yield
lowerCamelCase_ = self.feature_extractor
lowerCamelCase_ = False
| 19
|
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case :Any = logging.getLogger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = '''masked_bert'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=30_522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : List[str]="topK" , __SCREAMING_SNAKE_CASE : List[Any]="constant" , __SCREAMING_SNAKE_CASE : int=0.0 , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = pruning_method
__a = mask_init
__a = mask_scale
| 49
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.