code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : list) -> list:
'''simple docstring'''
if len(lowerCAmelCase__) < 2:
return collection
def circle_sort_util(lowerCAmelCase__ : list , lowerCAmelCase__ : int , lowerCAmelCase__ : int) -> bool:
_lowercase : Optional[int] = False
if low == high:
return swapped
_lowercase : Union[str, Any] = low
_lowercase : Union[str, Any] = high
while left < right:
if collection[left] > collection[right]:
_lowercase , _lowercase : Union[str, Any] = (
collection[right],
collection[left],
)
_lowercase : Any = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_lowercase , _lowercase : Union[str, Any] = (
collection[right + 1],
collection[left],
)
_lowercase : List[Any] = True
_lowercase : Optional[Any] = low + int((high - low) / 2)
_lowercase : Tuple = circle_sort_util(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
_lowercase : List[Any] = circle_sort_util(lowerCAmelCase__ , mid + 1 , lowerCAmelCase__)
return swapped or left_swap or right_swap
_lowercase : Optional[int] = True
while is_not_sorted is True:
_lowercase : Tuple = circle_sort_util(lowerCAmelCase__ , 0 , len(lowerCAmelCase__) - 1)
return collection
if __name__ == "__main__":
A = input('''Enter numbers separated by a comma:\n''').strip()
A = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 125
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : int , lowerCAmelCase__ : int) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0) != 0)
def SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
assert nand_gate(0 , 0) == 1
assert nand_gate(0 , 1) == 1
assert nand_gate(1 , 0) == 1
assert nand_gate(1 , 1) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 125
| 1
|
"""simple docstring"""
import argparse
import os
import re
__lowercase = '''src/transformers'''
# Pattern that looks at the indentation in a line.
__lowercase = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
__lowercase = re.compile(r'''^\s*\"([^\"]+)\":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowercase = re.compile(r'''^\s*_import_structure\[\"([^\"]+)\"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
__lowercase = re.compile(r'''^\s*\"([^\"]+)\",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowercase = re.compile(r'''\[([^\]]+)\]''')
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =_re_indent.search(__UpperCamelCase )
return "" if search is None else search.groups()[0]
def lowerCAmelCase (__UpperCamelCase : List[str] , __UpperCamelCase : Dict="" , __UpperCamelCase : Dict=None , __UpperCamelCase : Dict=None ):
"""simple docstring"""
__UpperCamelCase =0
__UpperCamelCase =code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(__UpperCamelCase ):
index += 1
__UpperCamelCase =['''\n'''.join(lines[:index] )]
else:
__UpperCamelCase =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__UpperCamelCase =[lines[index]]
index += 1
while index < len(__UpperCamelCase ) and (end_prompt is None or not lines[index].startswith(__UpperCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__UpperCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(__UpperCamelCase ) )
if index < len(__UpperCamelCase ) - 1:
__UpperCamelCase =[lines[index + 1]]
index += 1
else:
__UpperCamelCase =[]
else:
blocks.append('''\n'''.join(__UpperCamelCase ) )
__UpperCamelCase =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__UpperCamelCase ) > 0:
blocks.append('''\n'''.join(__UpperCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__UpperCamelCase ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def lowerCAmelCase (__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
def _inner(__UpperCamelCase : List[Any] ):
return key(__UpperCamelCase ).lower().replace('''_''' , '''''' )
return _inner
def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=None ):
"""simple docstring"""
def noop(__UpperCamelCase : Optional[Any] ):
return x
if key is None:
__UpperCamelCase =noop
# Constants are all uppercase, they go first.
__UpperCamelCase =[obj for obj in objects if key(__UpperCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__UpperCamelCase =[obj for obj in objects if key(__UpperCamelCase )[0].isupper() and not key(__UpperCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
__UpperCamelCase =[obj for obj in objects if not key(__UpperCamelCase )[0].isupper()]
__UpperCamelCase =ignore_underscore(__UpperCamelCase )
return sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Tuple ):
"""simple docstring"""
def _replace(__UpperCamelCase : int ):
__UpperCamelCase =match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
__UpperCamelCase =[part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__UpperCamelCase =keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(__UpperCamelCase )] ) + "]"
__UpperCamelCase =import_statement.split('''\n''' )
if len(__UpperCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__UpperCamelCase =2 if lines[1].strip() == '''[''' else 1
__UpperCamelCase =[(i, _re_strip_line.search(__UpperCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__UpperCamelCase =sort_objects(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] )
__UpperCamelCase =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__UpperCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__UpperCamelCase =_re_bracket_content.sub(_replace , lines[1] )
else:
__UpperCamelCase =[part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__UpperCamelCase =keys[:-1]
__UpperCamelCase =get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(__UpperCamelCase )] )
return "\n".join(__UpperCamelCase )
else:
# Finally we have to deal with imports fitting on one line
__UpperCamelCase =_re_bracket_content.sub(_replace , __UpperCamelCase )
return import_statement
def lowerCAmelCase (__UpperCamelCase : List[Any] , __UpperCamelCase : Tuple=True ):
"""simple docstring"""
with open(__UpperCamelCase , encoding='''utf-8''' ) as f:
__UpperCamelCase =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__UpperCamelCase =split_code_in_indented_blocks(
__UpperCamelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__UpperCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__UpperCamelCase =main_blocks[block_idx]
__UpperCamelCase =block.split('''\n''' )
# Get to the start of the imports.
__UpperCamelCase =0
while line_idx < len(__UpperCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__UpperCamelCase =len(__UpperCamelCase )
else:
line_idx += 1
if line_idx >= len(__UpperCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
__UpperCamelCase ='''\n'''.join(block_lines[line_idx:-1] )
__UpperCamelCase =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__UpperCamelCase =split_code_in_indented_blocks(__UpperCamelCase , indent_level=__UpperCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
__UpperCamelCase =_re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__UpperCamelCase =[(pattern.search(__UpperCamelCase ).groups()[0] if pattern.search(__UpperCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__UpperCamelCase =[(i, key) for i, key in enumerate(__UpperCamelCase ) if key is not None]
__UpperCamelCase =[x[0] for x in sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__UpperCamelCase =0
__UpperCamelCase =[]
for i in range(len(__UpperCamelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__UpperCamelCase =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__UpperCamelCase )
count += 1
# And we put our main block back together with its first and last line.
__UpperCamelCase ='''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__UpperCamelCase ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(__UpperCamelCase ) )
def lowerCAmelCase (__UpperCamelCase : Union[str, Any]=True ):
"""simple docstring"""
__UpperCamelCase =[]
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
__UpperCamelCase =sort_imports(os.path.join(__UpperCamelCase , '''__init__.py''' ) , check_only=__UpperCamelCase )
if result:
__UpperCamelCase =[os.path.join(__UpperCamelCase , '''__init__.py''' )]
if len(__UpperCamelCase ) > 0:
raise ValueError(F"""Would overwrite {len(__UpperCamelCase )} files, run `make style`.""" )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__lowercase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 700
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''convbert'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int=30522 , UpperCamelCase__ : int=768 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Optional[Any]=3072 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Dict=1E-12 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : List[str]=9 , UpperCamelCase__ : str=1 , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =embedding_size
__UpperCamelCase =head_ratio
__UpperCamelCase =conv_kernel_size
__UpperCamelCase =num_groups
__UpperCamelCase =classifier_dropout
class _lowercase ( __a ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCamelCase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 296
| 0
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowercase__ ( _UpperCamelCase) -> Optional[int]: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def lowercase__ ( _UpperCamelCase) -> Union[str, Any]: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class A__ :
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
class A__ ( __UpperCamelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 1
UpperCamelCase = [1, 2]
UpperCamelCase = {'''a''': 1, '''b''': 2}
UpperCamelCase = {'''a''': [1, 2], '''b''': [3, 4]}
UpperCamelCase = {'''a''': {'''1''': 1}, '''b''': 2}
UpperCamelCase = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = [2, 3]
UpperCamelCase = {'''a''': 2, '''b''': 3}
UpperCamelCase = {'''a''': [2, 3], '''b''': [4, 5]}
UpperCamelCase = {'''a''': {'''1''': 2}, '''b''': 3}
UpperCamelCase = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
UpperCamelCase = 2
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
UpperCamelCase = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
UpperCamelCase = {'''a''': 2, '''b''': 0, '''c''': 2}
UpperCamelCase = {
'''a''': np.eye(2 ).astype(__SCREAMING_SNAKE_CASE ),
'''b''': np.zeros(3 ).astype(__SCREAMING_SNAKE_CASE ),
'''c''': np.ones(2 ).astype(__SCREAMING_SNAKE_CASE ),
}
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , map_numpy=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , map_numpy=__SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , map_numpy=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , map_numpy=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__SCREAMING_SNAKE_CASE ): # can't pickle a local lambda
map_nested(lambda _SCREAMING_SNAKE_CASE : x + 1 , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = {'''a''': 1, '''b''': 2}
UpperCamelCase = {'''a''': 3, '''b''': 4}
UpperCamelCase = {'''a''': 5, '''b''': 6}
UpperCamelCase = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , __SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
class A__ :
'''simple docstring'''
snake_case__ = """bar"""
UpperCamelCase = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(__SCREAMING_SNAKE_CASE , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Optional[Any]:
"""simple docstring"""
with patch('datasets.utils.py_utils._single_map_nested') as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool') as mock_multiprocessing_pool:
UpperCamelCase = {F'{i}': i for i in range(_UpperCamelCase)}
UpperCamelCase = map_nested(lambda _UpperCamelCase: x + 10 , _UpperCamelCase , num_proc=_UpperCamelCase , parallel_min_length=16)
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( __UpperCamelCase ):
'''simple docstring'''
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase = layers.Dense(2 )
def gen_random_output():
UpperCamelCase = tf.random.uniform((1, 3) )
return model(__SCREAMING_SNAKE_CASE ).numpy()
with temp_seed(42 , set_tensorflow=__SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=__SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
import torch
def gen_random_output():
UpperCamelCase = torch.nn.Linear(3 , 2 )
UpperCamelCase = torch.rand(1 , 3 )
return model(__SCREAMING_SNAKE_CASE ).detach().numpy()
with temp_seed(42 , set_pytorch=__SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_pytorch=__SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}])
def lowercase__ ( _UpperCamelCase) -> str:
"""simple docstring"""
UpperCamelCase = NestedDataStructure(_UpperCamelCase).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> List[str]:
"""simple docstring"""
UpperCamelCase = NestedDataStructure(_UpperCamelCase).flatten()
assert output == expected_output
def lowercase__ ( ) -> int:
"""simple docstring"""
UpperCamelCase = A(x=1 , y='foobar')
UpperCamelCase = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(_UpperCamelCase) == expected_output
UpperCamelCase = {'''a''': {'''b''': A(x=10 , y='foo')}, '''c''': [A(x=20 , y='bar')]}
UpperCamelCase = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(_UpperCamelCase) == expected_output
with pytest.raises(_UpperCamelCase):
asdict([1, A(x=10 , y='foo')])
def lowercase__ ( _UpperCamelCase) -> str:
"""simple docstring"""
return text.split()
def lowercase__ ( _UpperCamelCase) -> List[str]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2)
yield (time.time(), content)
def lowercase__ ( ) -> Dict:
"""simple docstring"""
with Pool(2) as pool:
UpperCamelCase = list(iflatmap_unordered(_UpperCamelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10))
assert out.count('hello') == 10
assert out.count('there') == 10
assert len(_UpperCamelCase) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2) as pool:
UpperCamelCase = list(iflatmap_unordered(_UpperCamelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10))
assert out.count('hello') == 10
assert out.count('there') == 10
assert len(_UpperCamelCase) == 20
# check that we get items as fast as possible
with Pool(2) as pool:
UpperCamelCase = []
for yield_time, content in iflatmap_unordered(
_UpperCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}]):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_UpperCamelCase)
assert out.count('a') == 2
assert out.count('b') == 2
assert len(_UpperCamelCase) == 4
| 280
|
def lowercase_ ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if num < 0:
return False
snake_case__ : int =num
snake_case__ : int =0
while num > 0:
snake_case__ : int =rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 381
| 0
|
"""simple docstring"""
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 141
|
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _UpperCAmelCase ( _A ):
def __init__( self : Optional[int] , A : Optional[Any]=0.01 , A : int=10_00 ) -> Optional[int]:
lowercase_ : Dict = p_stop
lowercase_ : Optional[Any] = max_length
def __iter__( self : Dict ) -> Dict:
lowercase_ : str = 0
lowercase_ : Optional[int] = False
while not stop and count < self.max_length:
yield count
count += 1
lowercase_ : List[str] = random.random() < self.p_stop
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : List[Any] , A : Any , A : Union[str, Any] , A : Optional[Any]=False , A : Dict=True ) -> str:
lowercase_ : Tuple = [
BatchSamplerShard(A , 2 , A , split_batches=A , even_batches=A )
for i in range(2 )
]
lowercase_ : Optional[Any] = [list(A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(A ) for shard in batch_sampler_shards] , [len(A ) for e in expected] )
self.assertListEqual(A , A )
def A ( self : Dict ) -> Tuple:
# Check the shards when the dataset is a round multiple of total batch size.
lowercase_ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase_ : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase_ : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase_ : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is very small.
lowercase_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(A , A )
lowercase_ : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [[], []]
self.check_batch_sampler_shards(A , A )
def A ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase_ : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A )
def A ( self : str ) -> int:
# Check the shards when the dataset is a round multiple of total batch size.
lowercase_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase_ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase_ : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [[], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
def A ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
lowercase_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Union[str, Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
def A ( self : str ) -> str:
lowercase_ : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowercase_ : Tuple = [BatchSamplerShard(A , 2 , A , even_batches=A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def A ( self : Union[str, Any] , A : Union[str, Any] , A : Tuple , A : Dict , A : str=False , A : Any=2 , A : Optional[int]=False ) -> Optional[Any]:
random.seed(A )
lowercase_ : Any = list(A )
lowercase_ : Optional[int] = [
IterableDatasetShard(
A , batch_size=A , drop_last=A , num_processes=A , process_index=A , split_batches=A , )
for i in range(A )
]
lowercase_ : Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(A )
iterable_dataset_lists.append(list(A ) )
lowercase_ : List[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowercase_ : Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(A ) , len(A ) )
self.assertTrue(len(A ) % shard_batch_size == 0 )
lowercase_ : Optional[int] = []
for idx in range(0 , len(A ) , A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(A ) < len(A ):
reference += reference
self.assertListEqual(A , reference[: len(A )] )
def A ( self : Optional[Any] ) -> List[str]:
lowercase_ : int = 42
lowercase_ : Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
# Edge case with a very small dataset
lowercase_ : List[str] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
def A ( self : Optional[Any] ) -> Tuple:
lowercase_ : List[str] = BatchSampler(range(16 ) , batch_size=4 , drop_last=A )
lowercase_ : int = SkipBatchSampler(A , 2 )
self.assertListEqual(list(A ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : List[str] ) -> Union[str, Any]:
lowercase_ : int = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : Dict ) -> int:
lowercase_ : Optional[Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
lowercase_ : Union[str, Any] = skip_first_batches(A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : List[str] ) -> str:
lowercase_ : Any = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def A ( self : Optional[Any] ) -> Optional[int]:
Accelerator()
lowercase_ : Tuple = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 141
| 1
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCamelCase_( snake_case__: List[str] ) -> int:
# vision encoder
if "img_encoder.pos_embed" in name:
UpperCAmelCase__ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
UpperCAmelCase__ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
UpperCAmelCase__ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
UpperCAmelCase__ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
UpperCAmelCase__ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
UpperCAmelCase__ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
UpperCAmelCase__ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
UpperCAmelCase__ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
UpperCAmelCase__ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
UpperCAmelCase__ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
UpperCAmelCase__ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
UpperCAmelCase__ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
UpperCAmelCase__ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
UpperCAmelCase__ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
UpperCAmelCase__ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
UpperCAmelCase__ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
UpperCAmelCase__ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
UpperCAmelCase__ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
UpperCAmelCase__ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
UpperCAmelCase__ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
UpperCAmelCase__ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
UpperCAmelCase__ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
UpperCAmelCase__ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
UpperCAmelCase__ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCamelCase_( snake_case__: Tuple , snake_case__: List[str] ) -> Any:
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCAmelCase__ = key.split('.' )
UpperCAmelCase__ , UpperCAmelCase__ = int(key_split[2] ), int(key_split[4] )
UpperCAmelCase__ = config.vision_config.hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[dim : dim * 2, :]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[dim : dim * 2]
UpperCAmelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCAmelCase__ = key.split('.' )
UpperCAmelCase__ = int(key_split[3] )
UpperCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[dim : dim * 2]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = rename_key(snake_case__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
UpperCAmelCase__ = val.squeeze_()
else:
UpperCAmelCase__ = val
return orig_state_dict
def UpperCamelCase_( ) -> Any:
UpperCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( snake_case__: str , snake_case__: List[Any] , snake_case__: Optional[int]="groupvit-gcc-yfcc" , snake_case__: Optional[int]=False ) -> List[Any]:
UpperCAmelCase__ = GroupViTConfig()
UpperCAmelCase__ = GroupViTModel(snake_case__ ).eval()
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
UpperCAmelCase__ = convert_state_dict(snake_case__ , snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = model.load_state_dict(snake_case__ , strict=snake_case__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(snake_case__ ) == 0)
# verify result
UpperCAmelCase__ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=snake_case__ , padding=snake_case__ , return_tensors='pt' )
with torch.no_grad():
UpperCAmelCase__ = model(**snake_case__ )
if model_name == "groupvit-gcc-yfcc":
UpperCAmelCase__ = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
UpperCAmelCase__ = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , snake_case__ , atol=1e-3 )
processor.save_pretrained(snake_case__ )
model.save_pretrained(snake_case__ )
print('Successfully saved processor and model to' , snake_case__ )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(snake_case__ , organization='nielsr' )
model.push_to_hub(snake_case__ , organization='nielsr' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
_UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 146
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = XLMRobertaTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = '<pad>'
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(__a ) , 1002 )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = XLMRobertaTokenizer(__a , keep_accents=__a )
UpperCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@cached_property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__a , f.name )
UpperCAmelCase__ = XLMRobertaTokenizer(f.name , keep_accents=__a )
UpperCAmelCase__ = pickle.dumps(__a )
pickle.loads(__a )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = 'I was born in 92000, and this is falsé.'
UpperCAmelCase__ = tokenizer.tokenize(__a )
UpperCAmelCase__ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase__ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(__a )
UpperCAmelCase__ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'Hello World!'
UpperCAmelCase__ = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
UpperCAmelCase__ = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = {'input_ids': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 146
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = KandinskyInpaintPipeline
__A = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
__A = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
__A = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
return 32
@property
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self ) -> Dict:
'''simple docstring'''
return 100
@property
def _a ( self ) -> str:
'''simple docstring'''
lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(_lowerCAmelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_lowerCAmelCase , )
lowercase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Dict:
'''simple docstring'''
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCAmelCase )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(_lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(_lowerCAmelCase )
else:
lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 653
|
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49
|
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__SCREAMING_SNAKE_CASE = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = 'hopper-medium-v2'
__SCREAMING_SNAKE_CASE = gym.make(env_name)
__SCREAMING_SNAKE_CASE = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
__SCREAMING_SNAKE_CASE = env.reset()
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 10_00
__SCREAMING_SNAKE_CASE = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__SCREAMING_SNAKE_CASE = pipeline(obs, planning_horizon=32)
# execute action in environment
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = env.step(denorm_actions)
__SCREAMING_SNAKE_CASE = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
__SCREAMING_SNAKE_CASE = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 553
| 0
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : Any = batch_size
UpperCamelCase__ : Optional[int] = image_size
UpperCamelCase__ : int = patch_size
UpperCamelCase__ : str = num_channels
UpperCamelCase__ : List[str] = is_training
UpperCamelCase__ : Union[str, Any] = use_labels
UpperCamelCase__ : Tuple = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : int = backbone_out_indices
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : Union[str, Any] = num_labels
UpperCamelCase__ : Union[str, Any] = backbone_featmap_shape
UpperCamelCase__ : Any = scope
UpperCamelCase__ : List[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Union[str, Any] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : int = None
if self.use_labels:
UpperCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : int = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [9_6, 1_9_2, 3_8_4, 7_6_8],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.num_labels
UpperCamelCase__ : Union[str, Any] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.num_labels
UpperCamelCase__ : Optional[Any] = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : List[Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ : List[Any] = config_and_inputs
UpperCamelCase__ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = DPTModelTester(self )
UpperCamelCase__ : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Dict = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Dict = [*signature.parameters.keys()]
UpperCamelCase__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Union[str, Any] = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
UpperCamelCase__ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : Optional[int] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : List[str] = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase__ : int = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Tuple = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase__ : Dict = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
UpperCamelCase__ : Any = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase__ : str = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase__ : Dict = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Any = '''add'''
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[Any] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase__ : Optional[int] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = prepare_img()
UpperCamelCase__ : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase__ : List[Any] = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 704
|
import torch
from transformers import AutoModel
class _lowerCamelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ) -> Optional[Any]:
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
UpperCamelCase__ : Tuple = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase__ : List[Any] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ) -> str:
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = W_supports['''sizes'''].tolist()
UpperCamelCase__ : Tuple = W_supports['''start_token_id'''].item()
UpperCamelCase__ : int = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase__ : Optional[int] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = None
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Any = W_supports['''input_ids'''] == start_token_id
UpperCamelCase__ : Optional[int] = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase__ : int = 0
else:
UpperCamelCase__ : Optional[int] = support_sizes[i - 1]
UpperCamelCase__ : List[Any] = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase__ : Optional[Any] = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase__ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase__ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase__ : Union[str, Any] = torch.vstack((p_starts, p_start) )
UpperCamelCase__ : str = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase__ : List[Any] = p_start
UpperCamelCase__ : List[str] = p_end
return p_starts, p_ends
| 462
| 0
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
UpperCamelCase : int = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : Dict=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : List[Any]=14 , lowerCamelCase__ : Optional[int]=10 , lowerCamelCase__ : int=19 , lowerCamelCase__ : List[Any]=5 , lowerCamelCase__ : int=4 , lowerCamelCase__ : str=True , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : int=4 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : Dict="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Any=[1, 2, 3, 4, 5] , lowerCamelCase__ : List[Any]=25 , lowerCamelCase__ : List[Any]=5 , ):
a__ : Optional[Any] = d_model
a__ : List[str] = parent
a__ : str = batch_size
a__ : List[Any] = prediction_length
a__ : List[Any] = context_length
a__ : str = cardinality
a__ : List[Any] = num_time_features
a__ : Dict = lags_sequence
a__ : int = embedding_dimension
a__ : Dict = is_training
a__ : Dict = hidden_size
a__ : int = num_hidden_layers
a__ : str = num_attention_heads
a__ : Dict = intermediate_size
a__ : List[str] = hidden_act
a__ : str = hidden_dropout_prob
a__ : Optional[Any] = attention_probs_dropout_prob
a__ : Optional[int] = context_length
a__ : Any = prediction_length + label_length
a__ : List[Any] = label_length
a__ : int = moving_average
a__ : Any = autocorrelation_factor
def _UpperCamelCase( self : Dict ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _UpperCamelCase( self : int , lowerCamelCase__ : List[Any] ):
a__ : str = config.context_length + max(config.lags_sequence )
a__ : Optional[int] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
a__ : Optional[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
a__ : Optional[int] = floats_tensor([self.batch_size, _past_length] )
a__ : Any = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
a__ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
a__ : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length] )
a__ : str = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _UpperCamelCase( self : List[Any] ):
a__ : Dict = self.get_config()
a__ : Any = self.prepare_autoformer_inputs_dict(lowerCamelCase__ )
return config, inputs_dict
def _UpperCamelCase( self : str ):
a__, a__ : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] ):
a__ : Union[str, Any] = AutoformerModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
a__ : List[Any] = model(**lowerCamelCase__ )
a__ : int = outputs.encoder_last_hidden_state
a__ : Optional[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : str = model.get_encoder()
encoder.save_pretrained(lowerCamelCase__ )
a__ : List[str] = AutoformerEncoder.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
a__, a__, a__, a__, a__ : List[str] = model.create_network_inputs(**lowerCamelCase__ )
a__, a__ : Union[str, Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
a__ : str = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
a__ : Optional[int] = encoder(inputs_embeds=lowerCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
a__ : Tuple = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
a__ : Tuple = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
a__ : Dict = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
a__ : Union[str, Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Any = model.get_decoder()
decoder.save_pretrained(lowerCamelCase__ )
a__ : Dict = AutoformerDecoder.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
a__ : Optional[int] = decoder(
trend=lowerCamelCase__ , inputs_embeds=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowercase = (AutoformerForPrediction,) if is_torch_available() else ()
_lowercase = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[Any] = AutoformerModelTester(self )
a__ : List[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _UpperCamelCase( self : List[Any] ):
a__, a__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
a__ : Any = model_class(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
a__, a__ : List[str] = model_class.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def _UpperCamelCase( self : Optional[Any] ):
pass
def _UpperCamelCase( self : Any ):
a__ : str = inspect.signature(getattr(lowerCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
a__ : int = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[Any] = model_class(lowerCamelCase__ )
a__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Tuple = [*signature.parameters.keys()]
a__ : Any = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(lowerCamelCase__ )] , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__, a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Any = True
a__ : int = getattr(self.model_tester , "seq_length" , lowerCamelCase__ )
a__ : Optional[Any] = getattr(self.model_tester , "decoder_seq_length" , lowerCamelCase__ )
a__ : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , lowerCamelCase__ )
a__ : Optional[int] = getattr(self.model_tester , "d_model" , lowerCamelCase__ )
a__ : Tuple = getattr(self.model_tester , "num_attention_heads" , lowerCamelCase__ )
a__ : Union[str, Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
a__ : Tuple = True
a__ : List[Any] = False
a__ : Optional[Any] = True
a__ : Optional[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
a__ : str = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
a__ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__ : List[str] = True
a__ : Any = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
a__ : str = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
a__ : str = outputs.encoder_attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
a__ : int = len(lowerCamelCase__ )
a__ : Tuple = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
# decoder attentions
a__ : List[Any] = outputs.decoder_attentions
self.assertIsInstance(lowerCamelCase__ , (list, tuple) )
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
a__ : Optional[int] = outputs.cross_attentions
self.assertIsInstance(lowerCamelCase__ , (list, tuple) )
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
a__ : int = True
a__ : List[str] = True
a__ : Union[str, Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
a__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(out_len + 2 , len(lowerCamelCase__ ) )
a__ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _UpperCamelCase( self : Any ):
super().test_retain_grad_hidden_states_attentions()
def UpperCamelCase_ ( __a="train-batch.pt" ) -> Optional[Any]:
a__ : Union[str, Any] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__a , repo_type="dataset" )
a__ : Tuple = torch.load(__a , map_location=__a )
return batch
@require_torch
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Tuple ):
a__ : Optional[Any] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase__ )
a__ : Optional[Any] = prepare_batch()
with torch.no_grad():
a__ : int = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
a__ : Union[str, Any] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCamelCase__ )
a__ : List[str] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=lowerCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def _UpperCamelCase( self : Dict ):
a__ : Optional[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase__ )
a__ : int = prepare_batch("val-batch.pt" )
with torch.no_grad():
a__ : List[Any] = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
a__ : Any = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCamelCase__ )
a__ : Tuple = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=lowerCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def _UpperCamelCase( self : Dict ):
a__ : Optional[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase__ )
a__ : int = prepare_batch("val-batch.pt" )
with torch.no_grad():
a__ : List[str] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
a__ : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCamelCase__ )
a__ : Tuple = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=lowerCamelCase__ )
a__ : Tuple = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCamelCase__ , rtol=1E-1 ) )
| 37
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase_ ( ) -> int:
a__ : Any = HfArgumentParser(__a )
a__ : Any = parser.parse_args_into_dataclasses()[0]
a__ : Optional[int] = TensorFlowBenchmark(args=__a )
try:
a__ : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] )
a__ : str = ""
a__ : List[Any] = eval(str(__a ).split(" " )[-1] )
a__ : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__a )
if len(__a ) > 0:
a__ : Tuple = full_error_msg + begin_error_msg + str(__a )
raise ValueError(__a )
benchmark.run()
if __name__ == "__main__":
main()
| 37
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = torch.device('cpu')
def lowerCamelCase ( ) -> Optional[int]:
_lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCamelCase ( UpperCamelCase : Optional[Any] ) -> Tuple:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def lowerCamelCase ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : int ) -> Dict:
_lowerCamelCase = dct.pop(__SCREAMING_SNAKE_CASE )
_lowerCamelCase = val
def lowerCamelCase ( UpperCamelCase : int ) -> Tuple:
_lowerCamelCase = []
for k in state_dict.keys():
_lowerCamelCase = k
if ".pwconv" in k:
_lowerCamelCase = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
_lowerCamelCase = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
_lowerCamelCase = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
_lowerCamelCase = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
_lowerCamelCase = k_new.split('.' )
if ls[2].isdigit():
_lowerCamelCase = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
_lowerCamelCase = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase ( UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ) -> List[str]:
_lowerCamelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_lowerCamelCase = 10_00
_lowerCamelCase = 'huggingface/label-files'
_lowerCamelCase = 'imagenet-1k-id2label.json'
_lowerCamelCase = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_lowerCamelCase = [3, 3, 6, 4]
_lowerCamelCase = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
_lowerCamelCase = [3, 3, 9, 6]
_lowerCamelCase = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
_lowerCamelCase = [4, 3, 10, 5]
_lowerCamelCase = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
_lowerCamelCase = [4, 4, 12, 6]
_lowerCamelCase = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
_lowerCamelCase = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=__SCREAMING_SNAKE_CASE )
else:
_lowerCamelCase = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )
_lowerCamelCase = checkpoint
_lowerCamelCase = create_rename_keys(__SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load HuggingFace model
_lowerCamelCase = SwiftFormerForImageClassification(__SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
# prepare test inputs
_lowerCamelCase = prepare_img()
_lowerCamelCase = ViTImageProcessor.from_pretrained('preprocessor_config' )
_lowerCamelCase = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
_lowerCamelCase = get_expected_output(__SCREAMING_SNAKE_CASE )
_lowerCamelCase = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , __SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
A = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 701
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , snake_case__ : str , snake_case__ : int = 1_3 , snake_case__ : int = 6_4 , snake_case__ : int = 2 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : int = 1_2_8 , snake_case__ : Optional[int]=[1_6, 3_2, 6_4, 1_2_8] , snake_case__ : int = 7 , snake_case__ : int = 4 , snake_case__ : int = 3_7 , snake_case__ : str = "gelu" , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : int = 1_0 , snake_case__ : float = 0.02 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : int = 1_2_8 , snake_case__ : List[int] = [2, 2, 2, 2] , snake_case__ : int = 2 , snake_case__ : int = 2 , ) -> Optional[Any]:
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = encoder_stride
_lowerCamelCase = num_attention_outputs
_lowerCamelCase = embed_dim
_lowerCamelCase = embed_dim + 1
_lowerCamelCase = resolution
_lowerCamelCase = depths
_lowerCamelCase = hidden_sizes
_lowerCamelCase = dim
_lowerCamelCase = mlp_expansion_ratio
def _snake_case ( self : Union[str, Any] ) -> List[str]:
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Union[str, Any] ) -> int:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _snake_case ( self : str , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] ) -> Optional[int]:
_lowerCamelCase = TFEfficientFormerModel(config=snake_case__ )
_lowerCamelCase = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Tuple ) -> Optional[Any]:
_lowerCamelCase = self.type_sequence_label_size
_lowerCamelCase = TFEfficientFormerForImageClassification(snake_case__ )
_lowerCamelCase = model(snake_case__ , labels=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFEfficientFormerForImageClassification(snake_case__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self : Dict ) -> List[str]:
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def _snake_case ( self : Optional[Any] ) -> Any:
_lowerCamelCase = TFEfficientFormerModelTester(self )
_lowerCamelCase = ConfigTester(
self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def _snake_case ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def _snake_case ( self : str ) -> Union[str, Any]:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def _snake_case ( self : Optional[int] ) -> List[str]:
pass
def _snake_case ( self : Any ) -> List[Any]:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(snake_case__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def _snake_case ( self : int ) -> int:
def check_hidden_states_output(snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : str ):
_lowerCamelCase = model_class(snake_case__ )
_lowerCamelCase = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
_lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_lowerCamelCase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_lowerCamelCase = seq_length * self.model_tester.chunk_length
else:
_lowerCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_lowerCamelCase = outputs.decoder_hidden_states
self.asseretIsInstance(snake_case__ , (list, tuple) )
self.assertEqual(len(snake_case__ ) , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'seq_length' , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'decoder_seq_length' , snake_case__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def _snake_case ( self : Dict , snake_case__ : int , snake_case__ : str , snake_case__ : List[str]=False ) -> List[Any]:
_lowerCamelCase = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self : Optional[Any] ) -> Dict:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def _snake_case ( self : str ) -> Tuple:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def _snake_case ( self : Optional[Any] ) -> Optional[Any]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def _snake_case ( self : Union[str, Any] ) -> Any:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = TFEfficientFormerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _snake_case ( self : List[Any] ) -> int:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = True
_lowerCamelCase = getattr(self.model_tester , 'seq_length' , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'encoder_seq_length' , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'key_length' , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'chunk_length' , snake_case__ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_lowerCamelCase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = model_class(snake_case__ )
_lowerCamelCase = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
_lowerCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCamelCase = True
_lowerCamelCase = model_class(snake_case__ )
_lowerCamelCase = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
_lowerCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _snake_case ( self : Any ) -> Union[str, Any]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_lowerCamelCase = model_class(snake_case__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_lowerCamelCase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=snake_case__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_lowerCamelCase = model(snake_case__ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase ( ) -> Optional[int]:
_lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ) -> Tuple:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def _snake_case ( self : List[str] ) -> List[Any]:
_lowerCamelCase = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=snake_case__ , return_tensors='tf' )
# forward pass
_lowerCamelCase = model(**snake_case__ , training=snake_case__ )
# verify the logits
_lowerCamelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCamelCase = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
@slow
def _snake_case ( self : List[Any] ) -> Optional[Any]:
_lowerCamelCase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=snake_case__ , return_tensors='tf' )
# forward pass
_lowerCamelCase = model(**snake_case__ , training=snake_case__ )
# verify the logits
_lowerCamelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCamelCase = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 234
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _a :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int]=13 , SCREAMING_SNAKE_CASE__ : Any=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=99 , SCREAMING_SNAKE_CASE__ : Tuple=[1, 1, 2] , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Dict="gelu_new" , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=5_12 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=False , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = block_sizes
lowerCamelCase__ = num_decoder_layers
lowerCamelCase__ = d_model
lowerCamelCase__ = n_head
lowerCamelCase__ = d_head
lowerCamelCase__ = d_inner
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = 2
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCamelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCamelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCamelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCamelCase__ = self.num_hidden_layers + 2
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , ):
lowerCamelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCamelCase__ = False
lowerCamelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCamelCase__ = False
lowerCamelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , ):
lowerCamelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCamelCase__ = False
lowerCamelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCamelCase__ = False
lowerCamelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
lowerCamelCase__ = TFFunnelForPreTraining(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
lowerCamelCase__ = TFFunnelForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFFunnelForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = TFFunnelForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFFunnelForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , ):
lowerCamelCase__ = TFFunnelForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
a_ : Optional[Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : Any = False
a_ : Optional[int] = False
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = TFFunnelModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : int = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
a_ : Dict = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = TFFunnelModelTester(self , base=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
| 510
|
"""simple docstring"""
def snake_case ( _a: list , _a: int = 0 )-> list:
'''simple docstring'''
lowerCamelCase__ = length or len(_a )
lowerCamelCase__ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowerCamelCase__ , lowerCamelCase__ = list_data[i + 1], list_data[i]
lowerCamelCase__ = True
return list_data if not swapped else bubble_sort(_a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510
| 1
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a__ :
def __init__( self : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any=1_3 , lowerCamelCase_ : Tuple=6_4 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Tuple=3_2 , lowerCamelCase_ : str=5 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Dict=3_7 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : List[str]=1_0 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : int=[1, 1_6, 4, 4] , lowerCamelCase_ : Any=None , ):
a_ : str = parent
a_ : Tuple = batch_size
a_ : List[Any] = image_size
a_ : Optional[int] = patch_size
a_ : Union[str, Any] = num_channels
a_ : Any = is_training
a_ : List[Any] = use_labels
a_ : Optional[Any] = hidden_size
a_ : Optional[int] = num_hidden_layers
a_ : Optional[int] = num_attention_heads
a_ : Dict = intermediate_size
a_ : Any = hidden_act
a_ : Union[str, Any] = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : Dict = type_sequence_label_size
a_ : List[str] = initializer_range
a_ : Union[str, Any] = scope
a_ : Any = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
a_ : Tuple = (self.image_size // 3_2) ** 2
a_ : Dict = num_patches + 1
def UpperCAmelCase( self : List[Any] ):
a_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Dict = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase( self : Union[str, Any] ):
a_ : Optional[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 1_6, 3_2],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowerCamelCase_ , )
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ):
a_ : List[str] = ViTHybridModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ):
a_ : List[str] = self.type_sequence_label_size
a_ : int = ViTHybridForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a_ : Optional[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase( self : Union[str, Any] ):
a_ : Any = self.prepare_config_and_inputs()
a_ , a_ , a_ : Optional[Any] = config_and_inputs
a_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
lowerCamelCase__: Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowerCamelCase__: str = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: List[Any] = False
lowerCamelCase__: Optional[Any] = False
lowerCamelCase__: Optional[Any] = False
def UpperCAmelCase( self : Optional[Any] ):
a_ : Dict = ViTHybridModelTester(self )
a_ : Any = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=3_7 )
def UpperCAmelCase( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase( self : Optional[Any] ):
pass
def UpperCAmelCase( self : Any ):
a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def UpperCAmelCase( self : Optional[Any] ):
a_ , a_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[str] = model_class(lowerCamelCase_ )
a_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Optional[int] = [*signature.parameters.keys()]
a_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def UpperCAmelCase( self : List[Any] ):
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCAmelCase( self : Tuple ):
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def UpperCAmelCase( self : Union[str, Any] ):
a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Union[str, Any] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
a_ : Tuple = model_class(config=lowerCamelCase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
a_ : Optional[int] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase( self : Dict ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : str = ViTHybridModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def _a ( ):
a_ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase( self : Optional[int] ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase( self : str ):
a_ : str = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase_ )
a_ : int = self.default_image_processor
a_ : List[Any] = prepare_img()
a_ : str = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
a_ : Tuple = model(**lowerCamelCase_ )
# verify the logits
a_ : Tuple = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
a_ : List[str] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@slow
@require_accelerate
def UpperCAmelCase( self : Optional[Any] ):
a_ : List[Any] = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
a_ : str = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
a_ : Dict = prepare_img()
a_ : Dict = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" )
a_ : Optional[Any] = model(**lowerCamelCase_ )
a_ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
a_ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 478
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
__lowerCamelCase = False
__lowerCamelCase = False
def _a ( __UpperCamelCase ):
return TrainCommand(__UpperCamelCase )
class a__ ( lowerCAmelCase_ ):
@staticmethod
def UpperCAmelCase( lowerCamelCase_ : ArgumentParser ):
a_ : List[Any] = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=lowerCamelCase_ , required=lowerCamelCase_ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=lowerCamelCase_ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=lowerCamelCase_ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=lowerCamelCase_ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=lowerCamelCase_ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=lowerCamelCase_ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=lowerCamelCase_ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=lowerCamelCase_ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=lowerCamelCase_ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=lowerCamelCase_ , default=3_2 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=lowerCamelCase_ , default=6_4 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=lowerCamelCase_ , default=3E-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=lowerCamelCase_ , default=1E-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=lowerCamelCase_ )
def __init__( self : int , lowerCamelCase_ : Namespace ):
a_ : Optional[int] = logging.get_logger("""transformers-cli/training""" )
a_ : List[str] = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=lowerCamelCase_ )
a_ : List[str] = args.output
a_ : Optional[int] = args.column_label
a_ : Tuple = args.column_text
a_ : Optional[int] = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
a_ : str = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
a_ : int = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a_ : Union[str, Any] = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
a_ : Tuple = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a_ : str = args.validation_split
a_ : Tuple = args.train_batch_size
a_ : List[Any] = args.valid_batch_size
a_ : str = args.learning_rate
a_ : str = args.adam_epsilon
def UpperCAmelCase( self : List[str] ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCAmelCase( self : Any ):
raise NotImplementedError
def UpperCAmelCase( self : List[str] ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 478
| 1
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@register_to_config
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False , ):
super().__init__()
lowerCAmelCase_ = nn.Embedding(lowercase_ , lowercase_ )
lowerCAmelCase_ = nn.Embedding(lowercase_ , lowercase_ )
lowerCAmelCase_ = False
lowerCAmelCase_ = nn.Dropout(p=lowercase_ )
lowerCAmelCase_ = TaConfig(
vocab_size=lowercase_ , d_model=lowercase_ , num_heads=lowercase_ , d_kv=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ , feed_forward_proj=lowercase_ , is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , )
lowerCAmelCase_ = nn.ModuleList()
for lyr_num in range(lowercase_ ):
lowerCAmelCase_ = TaBlock(lowercase_ )
self.encoders.append(lowercase_ )
lowerCAmelCase_ = TaLayerNorm(lowercase_ )
lowerCAmelCase_ = nn.Dropout(p=lowercase_ )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = self.token_embedder(lowercase_ )
lowerCAmelCase_ = encoder_input_tokens.shape[1]
lowerCAmelCase_ = torch.arange(lowercase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowercase_ )
lowerCAmelCase_ = self.dropout_pre(lowercase_ )
# inverted the attention mask
lowerCAmelCase_ = encoder_input_tokens.size()
lowerCAmelCase_ = self.get_extended_attention_mask(lowercase_ , lowercase_ )
for lyr in self.encoders:
lowerCAmelCase_ = lyr(lowercase_ , lowercase_ )[0]
lowerCAmelCase_ = self.layer_norm(lowercase_ )
return self.dropout_post(lowercase_ ), encoder_inputs_mask
| 274
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self , lowercase_ ) -> None:
UpperCAmelCase = value
UpperCAmelCase = None
UpperCAmelCase = None
class _UpperCAmelCase :
def __init__( self , lowercase_ ) -> None:
UpperCAmelCase = tree
def a_ ( self , lowercase_ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 373
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Union[str, Any] = "altclip_text_model"
def __init__( self , SCREAMING_SNAKE_CASE__=250002 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=514 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-05 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=768 , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = initializer_factor
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = project_dim
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[Any] = "altclip_vision_model"
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=224 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__="quick_gelu" , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1.0 , **SCREAMING_SNAKE_CASE__ , ) -> List[Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A__ = hidden_size
A__ = intermediate_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = initializer_factor
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
A__ , A__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
A__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Any = "altclip"
A__ : str = True
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=2.6_5_9_2 , **SCREAMING_SNAKE_CASE__ ) -> Any:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
A__ = kwargs.pop("text_config_dict" , SCREAMING_SNAKE_CASE__ )
A__ = kwargs.pop("vision_config_dict" , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A__ = {}
# This is the complete result when using `text_config_dict`.
A__ = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A__ = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(SCREAMING_SNAKE_CASE__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A__ = {}
# This is the complete result when using `vision_config_dict`.
A__ = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A__ = {
str(SCREAMING_SNAKE_CASE__ ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A__ = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(SCREAMING_SNAKE_CASE__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A__ = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
A__ = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
A__ = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE__ )
A__ = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE__ )
A__ = projection_dim
A__ = logit_scale_init_value
A__ = 1.0
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 562
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : torch.FloatTensor
class UpperCamelCase__ ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ = 65536 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = "fourier" , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE__ = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = (32, 32, 64) , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 8 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = False , ) -> Union[str, Any]:
super().__init__()
A__ = sample_size
# time
if time_embedding_type == "fourier":
A__ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE__ , log=SCREAMING_SNAKE_CASE__ , flip_sin_to_cos=SCREAMING_SNAKE_CASE__ )
A__ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
A__ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE__ , downscale_freq_shift=SCREAMING_SNAKE_CASE__ )
A__ = block_out_channels[0]
if use_timestep_embedding:
A__ = block_out_channels[0] * 4
A__ = TimestepEmbedding(
in_channels=SCREAMING_SNAKE_CASE__ , time_embed_dim=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , out_dim=block_out_channels[0] , )
A__ = nn.ModuleList([] )
A__ = None
A__ = nn.ModuleList([] )
A__ = None
# down
A__ = in_channels
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
A__ = output_channel
A__ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
A__ = i == len(SCREAMING_SNAKE_CASE__ ) - 1
A__ = get_down_block(
SCREAMING_SNAKE_CASE__ , num_layers=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(SCREAMING_SNAKE_CASE__ )
# mid
A__ = get_mid_block(
SCREAMING_SNAKE_CASE__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE__ , add_downsample=SCREAMING_SNAKE_CASE__ , )
# up
A__ = list(reversed(SCREAMING_SNAKE_CASE__ ) )
A__ = reversed_block_out_channels[0]
if out_block_type is None:
A__ = out_channels
else:
A__ = block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
A__ = output_channel
A__ = (
reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE__ ) - 1 else final_upsample_channels
)
A__ = i == len(SCREAMING_SNAKE_CASE__ ) - 1
A__ = get_up_block(
SCREAMING_SNAKE_CASE__ , num_layers=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(SCREAMING_SNAKE_CASE__ )
A__ = output_channel
# out
A__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
A__ = get_out_block(
out_block_type=SCREAMING_SNAKE_CASE__ , num_groups_out=SCREAMING_SNAKE_CASE__ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , fc_dim=block_out_channels[-1] // 4 , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , ) -> Union[UNetaDOutput, Tuple]:
A__ = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE__ ):
A__ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE__ ) and len(timesteps.shape ) == 0:
A__ = timesteps[None].to(sample.device )
A__ = self.time_proj(SCREAMING_SNAKE_CASE__ )
if self.config.use_timestep_embedding:
A__ = self.time_mlp(SCREAMING_SNAKE_CASE__ )
else:
A__ = timestep_embed[..., None]
A__ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
A__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
A__ = ()
for downsample_block in self.down_blocks:
A__ , A__ = downsample_block(hidden_states=SCREAMING_SNAKE_CASE__ , temb=SCREAMING_SNAKE_CASE__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
A__ = self.mid_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
A__ = down_block_res_samples[-1:]
A__ = down_block_res_samples[:-1]
A__ = upsample_block(SCREAMING_SNAKE_CASE__ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ , temb=SCREAMING_SNAKE_CASE__ )
# 5. post-process
if self.out_block:
A__ = self.out_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=SCREAMING_SNAKE_CASE__ )
| 562
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __a ( snake_case_ ):
'''simple docstring'''
_lowerCamelCase : List[str] = """levit"""
def __init__( self , _lowerCamelCase=224 , _lowerCamelCase=3 , _lowerCamelCase=3 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=16 , _lowerCamelCase=[128, 256, 384] , _lowerCamelCase=[4, 8, 12] , _lowerCamelCase=[4, 4, 4] , _lowerCamelCase=[16, 16, 16] , _lowerCamelCase=0 , _lowerCamelCase=[2, 2, 2] , _lowerCamelCase=[2, 2, 2] , _lowerCamelCase=0.02 , **_lowerCamelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = image_size
__lowercase = num_channels
__lowercase = kernel_size
__lowercase = stride
__lowercase = padding
__lowercase = hidden_sizes
__lowercase = num_attention_heads
__lowercase = depths
__lowercase = key_dim
__lowercase = drop_path_rate
__lowercase = patch_size
__lowercase = attention_ratio
__lowercase = mlp_ratio
__lowercase = initializer_range
__lowercase = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __a ( snake_case_ ):
'''simple docstring'''
_lowerCamelCase : Any = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
'''simple docstring'''
return 1e-4
| 118
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = ['input_values', 'attention_mask']
def __init__( self , lowercase = 1 , lowercase = 16000 , lowercase = 0.0 , lowercase = False , lowercase = 80 , lowercase = 16 , lowercase = 64 , lowercase = "hann_window" , lowercase = 1.0 , lowercase = 80 , lowercase = 7600 , lowercase = 1e-10 , lowercase = 2 , lowercase = True , **lowercase , ) -> int:
super().__init__(feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , **lowercase )
lowerCamelCase_ = do_normalize
lowerCamelCase_ = return_attention_mask
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = hop_length
lowerCamelCase_ = win_length
lowerCamelCase_ = win_function
lowerCamelCase_ = frame_signal_scale
lowerCamelCase_ = fmin
lowerCamelCase_ = fmax
lowerCamelCase_ = mel_floor
lowerCamelCase_ = reduction_factor
lowerCamelCase_ = win_length * sampling_rate // 1000
lowerCamelCase_ = hop_length * sampling_rate // 1000
lowerCamelCase_ = optimal_fft_length(self.sample_size )
lowerCamelCase_ = (self.n_fft // 2) + 1
lowerCamelCase_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase )
lowerCamelCase_ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , lowercase , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , lowercase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE_( lowercase , lowercase , lowercase = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
lowerCamelCase_ = np.array(lowercase , np.intaa )
lowerCamelCase_ = []
for vector, length in zip(lowercase , attention_mask.sum(-1 ) ):
lowerCamelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCamelCase_ = padding_value
normed_input_values.append(lowercase )
else:
lowerCamelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE_( self , lowercase , ) -> np.ndarray:
lowerCamelCase_ = spectrogram(
lowercase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , lowercase = None , lowercase = None , lowercase = False , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
lowerCamelCase_ = self._process_audio(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase , )
else:
lowerCamelCase_ = None
if audio_target is not None:
lowerCamelCase_ = self._process_audio(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase , )
if inputs is None:
return inputs_target
else:
lowerCamelCase_ = inputs_target["input_values"]
lowerCamelCase_ = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCamelCase_ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = False , lowercase = False , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchFeature:
lowerCamelCase_ = isinstance(lowercase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase_ = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ = [np.asarray(lowercase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
lowerCamelCase_ = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ = [speech]
# needed to make pad() work on spectrogram inputs
lowerCamelCase_ = self.feature_size
# convert into correct format for padding
if is_target:
lowerCamelCase_ = [self._extract_mel_features(lowercase ) for waveform in speech]
lowerCamelCase_ = BatchFeature({"input_values": features} )
lowerCamelCase_ = self.num_mel_bins
else:
lowerCamelCase_ = BatchFeature({"input_values": speech} )
lowerCamelCase_ = self.pad(
lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , **lowercase , )
lowerCamelCase_ = feature_size_hack
# convert input values to correct format
lowerCamelCase_ = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
lowerCamelCase_ = [np.asarray(lowercase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowercase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowerCamelCase_ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowercase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowerCamelCase_ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCamelCase_ = [np.asarray(lowercase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCamelCase_ = (
attention_mask
if self._get_padding_strategies(lowercase , max_length=lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCamelCase_ = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=lowercase , padding_value=self.padding_value )
if return_tensors is not None:
lowerCamelCase_ = padded_inputs.convert_to_tensors(lowercase )
return padded_inputs
def SCREAMING_SNAKE_CASE_( self ) -> Dict[str, Any]:
lowerCamelCase_ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCamelCase_ = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 463
| 0
|
from functools import lru_cache
def a_ ( _A ) -> set:
"""simple docstring"""
snake_case__ = 2
snake_case__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowerCamelCase__ )
if n > 1:
factors.add(lowerCamelCase__ )
return factors
@lru_cache
def a_ ( _A ) -> int:
"""simple docstring"""
return len(unique_prime_factors(lowerCamelCase__ ) )
def a_ ( _A ) -> bool:
"""simple docstring"""
return len(set(lowerCamelCase__ ) ) in (0, 1)
def a_ ( _A ) -> list:
"""simple docstring"""
snake_case__ = 2
while True:
# Increment each value of a generated range
snake_case__ = [base + i for i in range(lowerCamelCase__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
snake_case__ = [upf_len(lowerCamelCase__ ) for x in group]
checker.append(lowerCamelCase__ )
# If all numbers in the list are equal, return the group variable.
if equality(lowerCamelCase__ ):
return group
# Increment our base variable by 1
base += 1
def a_ ( _A = 4 ) -> int:
"""simple docstring"""
snake_case__ = run(lowerCamelCase__ )
return results[0] if len(lowerCamelCase__ ) else None
if __name__ == "__main__":
print(solution())
| 710
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE:
def __init__( self: Any , UpperCamelCase: Tuple , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Optional[int]=True , UpperCamelCase: Tuple=True , UpperCamelCase: List[Any]=True , UpperCamelCase: Dict=True , UpperCamelCase: Any=99 , UpperCamelCase: int=24 , UpperCamelCase: List[Any]=2 , UpperCamelCase: Any=6 , UpperCamelCase: Union[str, Any]=37 , UpperCamelCase: int="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: List[Any]=16 , UpperCamelCase: str=2 , UpperCamelCase: Any=0.02 , UpperCamelCase: int=3 , UpperCamelCase: str=None , UpperCamelCase: str=10_00 , ) -> Any:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = scope
snake_case__ = range_bbox
def lowerCAmelCase_ ( self: Optional[Any] ) -> Dict:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case__ = bbox[i, j, 3]
snake_case__ = bbox[i, j, 1]
snake_case__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case__ = bbox[i, j, 2]
snake_case__ = bbox[i, j, 0]
snake_case__ = t
snake_case__ = None
if self.use_input_mask:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase_ ( self: int ) -> Optional[Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: str , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple , ) -> Dict:
snake_case__ = LiltModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
snake_case__ = model(UpperCamelCase , bbox=UpperCamelCase , token_type_ids=UpperCamelCase )
snake_case__ = model(UpperCamelCase , bbox=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , ) -> Tuple:
snake_case__ = self.num_labels
snake_case__ = LiltForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(
UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: List[str] , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , ) -> Any:
snake_case__ = LiltForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(
UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: int ) -> int:
return True
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case__ = LiltModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
def lowerCAmelCase_ ( self: Dict ) -> int:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
@slow
def lowerCAmelCase_ ( self: int ) -> Any:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = LiltModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_torch
@slow
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: List[Any] ) -> Dict:
snake_case__ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(UpperCamelCase )
snake_case__ = torch.tensor([[1, 2]] , device=UpperCamelCase )
snake_case__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase )
snake_case__ = torch.Size([1, 2, 7_68] )
snake_case__ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCamelCase , atol=1e-3 ) )
| 372
| 0
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ : int ) -> str:
'''simple docstring'''
def is_in_circle(snake_case_ : float , snake_case_ : float ) -> bool:
UpperCAmelCase_ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase_ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase_ = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Callable[[float], float] , snake_case_ : float = 0.0 , snake_case_ : float = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(snake_case_ , snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : float = 0.0 , snake_case_ : float = 1.0 ) -> None:
'''simple docstring'''
def identity_function(snake_case_ : float ) -> float:
return x
UpperCAmelCase_ = area_under_curve_estimator(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def lowerCAmelCase_ ( snake_case_ : int ) -> None:
'''simple docstring'''
def function_to_integrate(snake_case_ : float ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase_ = area_under_curve_estimator(
snake_case_ , snake_case_ , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__snake_case : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
__UpperCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCAmelCase : bool = field(
default=a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCAmelCase : bool = field(
default=a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class A :
__UpperCAmelCase : Optional[str] = field(default=a , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCAmelCase : bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCAmelCase : bool = field(
default=a , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowerCAmelCase ( self ) -> Optional[Any]:
if self.train_file is not None:
_a = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_a = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A :
__UpperCAmelCase : PreTrainedTokenizerBase
__UpperCAmelCase : Union[bool, str, PaddingStrategy] = True
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[int] = None
def __call__( self , snake_case_ ) -> Dict:
_a = "label" if "label" in features[0].keys() else "labels"
_a = [feature.pop(snake_case_ ) for feature in features]
_a = len(snake_case_ )
_a = len(features[0]["input_ids"] )
_a = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
_a = list(chain(*snake_case_ ) )
_a = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_a = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
_a = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def _lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a , _a , _a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a , _a , _a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag", lowerCamelCase__, lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_a = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_a = {}
if data_args.train_file is not None:
_a = data_args.train_file
if data_args.validation_file is not None:
_a = data_args.validation_file
_a = data_args.train_file.split("." )[-1]
_a = load_dataset(
lowerCamelCase__, data_files=lowerCamelCase__, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
_a = load_dataset(
"swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_a = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=lowerCamelCase__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_a = [F'''ending{i}''' for i in range(4 )]
_a = "sent1"
_a = "sent2"
if data_args.max_seq_length is None:
_a = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_a = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_a = min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase__ : Tuple ):
_a = [[context] * 4 for context in examples[context_name]]
_a = examples[question_header_name]
_a = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase__ )
]
# Flatten out
_a = list(chain(*lowerCamelCase__ ) )
_a = list(chain(*lowerCamelCase__ ) )
# Tokenize
_a = tokenizer(
lowerCamelCase__, lowerCamelCase__, truncation=lowerCamelCase__, max_length=lowerCamelCase__, padding="max_length" if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(lowerCamelCase__ ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_a = raw_datasets["train"]
if data_args.max_train_samples is not None:
_a = min(len(lowerCamelCase__ ), data_args.max_train_samples )
_a = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_a = train_dataset.map(
lowerCamelCase__, batched=lowerCamelCase__, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_a = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_a = min(len(lowerCamelCase__ ), data_args.max_eval_samples )
_a = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_a = eval_dataset.map(
lowerCamelCase__, batched=lowerCamelCase__, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
_a = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase__, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase__ : int ):
_a , _a = eval_predictions
_a = np.argmax(lowerCamelCase__, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
# Training
if training_args.do_train:
_a = None
if training_args.resume_from_checkpoint is not None:
_a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_a = last_checkpoint
_a = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_a = train_result.metrics
_a = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
_a = min(lowerCamelCase__, len(lowerCamelCase__ ) )
trainer.log_metrics("train", lowerCamelCase__ )
trainer.save_metrics("train", lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_a = trainer.evaluate()
_a = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
_a = min(lowerCamelCase__, len(lowerCamelCase__ ) )
trainer.log_metrics("eval", lowerCamelCase__ )
trainer.save_metrics("eval", lowerCamelCase__ )
_a = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase__ )
else:
trainer.create_model_card(**lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 717
|
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase_ = 16
lowerCamelCase_ = 32
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : List[Any] = 16 ) -> Any:
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" )
_SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" )
def tokenize_function(__A : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_SCREAMING_SNAKE_CASE = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__A : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_SCREAMING_SNAKE_CASE = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
_SCREAMING_SNAKE_CASE = 8
else:
_SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
snake_case__ , padding="longest" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="pt" , )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["train"] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
_SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["validation"] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase_ = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Tuple ) -> str:
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , snake_case__ ) == "1":
_SCREAMING_SNAKE_CASE = 2
# Initialize accelerator
_SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE = config["lr"]
_SCREAMING_SNAKE_CASE = int(config["num_epochs"] )
_SCREAMING_SNAKE_CASE = int(config["seed"] )
_SCREAMING_SNAKE_CASE = int(config["batch_size"] )
_SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_SCREAMING_SNAKE_CASE = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE
_SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE
set_seed(snake_case__ )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
_SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.loss
_SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(snake_case__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_SCREAMING_SNAKE_CASE = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_SCREAMING_SNAKE_CASE = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
_SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=snake_case__ , default=snake_case__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 418
|
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , UpperCAmelCase__ , )
| 543
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
snake_case__ : List[str] = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case__ , snake_case__ : Dict = unsorted[j - 1], unsorted[j]
snake_case__ : Tuple = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
snake_case__ , snake_case__ : Any = unsorted[j + 1], unsorted[j]
snake_case__ : str = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Any = input("Enter numbers separated by a comma:\n").strip()
A_ : Optional[int] = [int(item) for item in user_input.split(",")]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 705
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 419
| 0
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = 0
snake_case__ = False
snake_case__ = 3.0
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=_SCREAMING_SNAKE_CASE ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'a': 2, 'c': 2.2_5} )
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _SCREAMING_SNAKE_CASE )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
__magic_name__ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__magic_name__ : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
__magic_name__ : Optional[Any] = torch.nn.Linear(100, 200)
__magic_name__ : List[str] = accelerator.prepare(model)
# Check the values changed in kwargs
__magic_name__ : Optional[int] = ''''''
__magic_name__ : Dict = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 280
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A__ ( __snake_case ):
'''simple docstring'''
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase = bertabert.config.encoder.vocab_size
UpperCamelCase = tokenizer.sep_token_id
UpperCamelCase = tokenizer.cls_token_id
UpperCamelCase = 128
UpperCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase = train_dataset.select(range(32 ) )
UpperCamelCase = val_dataset.select(range(16 ) )
UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(_SCREAMING_SNAKE_CASE : Tuple ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase = tokenizer(batch['article'] , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=512 )
UpperCamelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=128 )
UpperCamelCase = inputs.input_ids
UpperCamelCase = inputs.attention_mask
UpperCamelCase = outputs.input_ids
UpperCamelCase = outputs.input_ids.copy()
UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase = outputs.attention_mask
assert all(len(_SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(_SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_SCREAMING_SNAKE_CASE : str ):
UpperCamelCase = pred.label_ids
UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=_SCREAMING_SNAKE_CASE , per_device_train_batch_size=_SCREAMING_SNAKE_CASE , per_device_eval_batch_size=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , evaluation_strategy='steps' , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 280
| 1
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase : Dict =[
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _lowercase ( ) -> Optional[Any]:
'''simple docstring'''
__A : Union[str, Any] = Github(os.environ['GITHUB_TOKEN'] )
__A : Union[str, Any] = g.get_repo('huggingface/diffusers' )
__A : Optional[int] = repo.get_issues(state='open' )
for issue in open_issues:
__A : Any = sorted(issue.get_comments() , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=_SCREAMING_SNAKE_CASE )
__A : Optional[int] = comments[0] if len(_SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 237
|
"""simple docstring"""
lowerCamelCase : int =[0, 2, 4, 6, 8]
lowerCamelCase : List[str] =[1, 3, 5, 7, 9]
def _lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__A : Union[str, Any] = 0
for digit in range(10 ):
__A : Dict = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return result
__A : Union[str, Any] = 0
for digita in range(10 ):
__A : Tuple = digita
if (remainder + digita) % 2 == 0:
__A : Union[str, Any] = ODD_DIGITS
else:
__A : Optional[int] = EVEN_DIGITS
for digita in other_parity_digits:
__A : Union[str, Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return result
def _lowercase ( _SCREAMING_SNAKE_CASE : int = 9 ) -> int:
'''simple docstring'''
__A : Tuple = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_SCREAMING_SNAKE_CASE , 0 , [0] * length , _SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 237
| 1
|
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase_ = logging.getLogger(__name__)
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A , A , A=None ) -> str:
"""simple docstring"""
super().__init__(
A , question_encoder_tokenizer=A , generator_tokenizer=A , index=A , init_retrieval=A , )
_a = None
def a__ (self , A ) -> Union[str, Any]:
"""simple docstring"""
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
_a = self._infer_socket_ifname()
# avoid clash with the NCCL port
_a = str(distributed_port + 1 )
_a = dist.new_group(ranks=A , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def a__ (self , A , A , A=torch.floataa ) -> int:
"""simple docstring"""
_a = torch.empty(A , dtype=A )
dist.scatter(A , src=0 , scatter_list=A , group=self.process_group )
return target_tensor
def a__ (self ) -> Dict:
"""simple docstring"""
_a = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_a = next((addr for addr in addrs if addr.startswith('''e''' )) , A )
return ifname
def a__ (self , A , A ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
_a , _a = self._main_retrieve(A , A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A )
# distributed training
_a = dist.get_world_size(group=self.process_group )
# gather logic
_a = None
if self._is_main():
_a = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(A )]
dist.gather(torch.tensor(A ) , dst=0 , gather_list=A , group=self.process_group )
# scatter logic
_a = question_hidden_states.shape[0]
_a = []
_a = []
if self._is_main():
assert len(A ) == world_size
_a , _a = self._main_retrieve(torch.cat(A ).numpy() , A )
_a , _a = torch.tensor(A ), torch.tensor(A )
_a = self._chunk_tensor(A , A )
_a = self._chunk_tensor(A , A )
_a = self._scattered(A , [n_queries, n_docs] , target_type=torch.intaa )
_a = self._scattered(A , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A )
| 11
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCamelCase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _lowerCAmelCase ( __magic_name__ :Optional[Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ :Tuple ):
from transformers.testing_utils import pytest_terminal_summary_main
UpperCAmelCase_ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ )
| 121
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
class __magic_name__ ( __lowerCAmelCase):
A: Optional[Any] = "timm_backbone"
def __init__( self : Optional[Any] , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : str=3 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ : str = backbone
UpperCamelCase__ : Union[str, Any] = num_channels
UpperCamelCase__ : List[str] = features_only
UpperCamelCase__ : Dict = use_pretrained_backbone
UpperCamelCase__ : Optional[Any] = True
UpperCamelCase__ : Any = out_indices if out_indices is not None else (-1,)
| 717
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class __magic_name__ ( __lowerCAmelCase):
A: List[Any] = "roberta-prelayernorm"
def __init__( self : Tuple , lowerCamelCase__ : List[Any]=50265 , lowerCamelCase__ : Optional[Any]=768 , lowerCamelCase__ : str=12 , lowerCamelCase__ : Union[str, Any]=12 , lowerCamelCase__ : Dict=3072 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : List[str]=512 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Tuple=0.02 , lowerCamelCase__ : List[Any]=1E-1_2 , lowerCamelCase__ : str=1 , lowerCamelCase__ : int=0 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Union[str, Any]="absolute" , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : List[str] = num_hidden_layers
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : List[str] = hidden_act
UpperCamelCase__ : Optional[int] = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : Optional[int] = max_position_embeddings
UpperCamelCase__ : Optional[Any] = type_vocab_size
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : Dict = layer_norm_eps
UpperCamelCase__ : Union[str, Any] = position_embedding_type
UpperCamelCase__ : Optional[int] = use_cache
UpperCamelCase__ : int = classifier_dropout
class __magic_name__ ( __lowerCAmelCase):
@property
def UpperCAmelCase__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 106
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case( _lowercase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = BioGptTokenizer
UpperCAmelCase : List[Any] = False
def __snake_case ( self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__A , range(len(__A ) ) ) )
lowerCAmelCase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def __snake_case ( self , A_ ) -> Any:
lowerCAmelCase = """lower newer"""
lowerCAmelCase = """lower newer"""
return input_text, output_text
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase = """lower"""
lowerCAmelCase = ["""low""", """er</w>"""]
lowerCAmelCase = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCAmelCase = tokens + ["""<unk>"""]
lowerCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def __snake_case ( self ) -> int:
lowerCAmelCase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 433
|
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17
| 0
|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : Dict = int(_lowercase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=3_0_0 ) -> Union[str, Any]:
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def __lowerCamelCase ( _lowercase ) -> Dict:
UpperCAmelCase : Any = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
UpperCAmelCase : str = F'''{elt:.6f}''' if isinstance(_lowercase , _lowercase ) else str(_lowercase )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class UpperCamelCase_ :
lowercase = 5
lowercase = 0.2
def __init__( self , A , A = None , A = True , A = None , A = 300 , ) -> int:
UpperCAmelCase : Tuple = total
UpperCAmelCase : Any = """""" if prefix is None else prefix
UpperCAmelCase : Dict = leave
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Optional[int] = width
UpperCAmelCase : str = None
UpperCAmelCase : int = None
UpperCAmelCase : Optional[Any] = None
def _lowercase( self , A , A = False , A = None ) -> Tuple:
UpperCAmelCase : List[Any] = value
if comment is not None:
UpperCAmelCase : List[str] = comment
if self.last_value is None:
UpperCAmelCase : Optional[int] = time.time()
UpperCAmelCase : int = value
UpperCAmelCase : int = None
UpperCAmelCase : str = self.warmup
UpperCAmelCase : Optional[int] = 1
self.update_bar(A )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
UpperCAmelCase : Any = time.time()
UpperCAmelCase : str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
UpperCAmelCase : Union[str, Any] = self.elapsed_time / (value - self.start_value)
else:
UpperCAmelCase : Optional[int] = None
if value >= self.total:
UpperCAmelCase : Dict = self.total
UpperCAmelCase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
UpperCAmelCase : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(A )
UpperCAmelCase : str = value
UpperCAmelCase : Optional[int] = current_time
if self.average_time_per_item is None:
UpperCAmelCase : Dict = 1
else:
UpperCAmelCase : Dict = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _lowercase( self , A , A=None ) -> Any:
UpperCAmelCase : List[Any] = """ """ * (len(str(self.total ) ) - len(str(A ) )) + str(A )
if self.elapsed_time is None:
UpperCAmelCase : Optional[int] = f'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
UpperCAmelCase : Tuple = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
UpperCAmelCase : List[Any] = (
f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
f''' {format_time(self.predicted_remaining )}'''
)
self.label += f''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]'''
self.display()
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
UpperCAmelCase : List[Any] = disp.display(disp.HTML(self.html_code ) , display_id=A )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowercase( self ) -> Any:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A=None ) -> int:
super().__init__(A )
UpperCAmelCase : Optional[int] = None if column_names is None else [column_names]
UpperCAmelCase : List[str] = None
def _lowercase( self ) -> Any:
UpperCAmelCase : int = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
UpperCAmelCase : Union[str, Any] = disp.display(disp.HTML(self.html_code ) , display_id=A )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowercase( self , A ) -> Dict:
if self.inner_table is None:
UpperCAmelCase : Optional[Any] = [list(values.keys() ), list(values.values() )]
else:
UpperCAmelCase : Dict = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(A )
UpperCAmelCase : Dict = columns
self.inner_table.append([values[c] for c in columns] )
def _lowercase( self , A , A=None , A=300 ) -> int:
UpperCAmelCase : Optional[int] = NotebookProgressBar(A , prefix=A , parent=self , width=A )
return self.child_bar
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = None
self.display()
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self ) -> Union[str, Any]:
UpperCAmelCase : List[str] = None
UpperCAmelCase : int = None
UpperCAmelCase : Any = False
def _lowercase( self , A , A , A , **A ) -> Any:
UpperCAmelCase : List[Any] = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Optional[int] = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
UpperCAmelCase : List[Any] = NotebookTrainingTracker(state.max_steps , A )
def _lowercase( self , A , A , A , **A ) -> List[str]:
UpperCAmelCase : Optional[int] = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
UpperCAmelCase : Union[str, Any] = False
def _lowercase( self , A , A , A , A=None , **A ) -> List[Any]:
if not has_length(A ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
UpperCAmelCase : List[str] = self.training_tracker.add_child(len(A ) )
else:
UpperCAmelCase : str = NotebookProgressBar(len(A ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _lowercase( self , A , A , A , **A ) -> Any:
if self.prediction_bar is not None:
self.prediction_bar.close()
UpperCAmelCase : Union[str, Any] = None
def _lowercase( self , A , A , A , A=None , **A ) -> Any:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
UpperCAmelCase : Optional[Any] = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
UpperCAmelCase : int = state.global_step
self.training_tracker.write_line(A )
def _lowercase( self , A , A , A , A=None , **A ) -> List[Any]:
if self.training_tracker is not None:
UpperCAmelCase : str = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
UpperCAmelCase : Optional[Any] = log["""loss"""]
break
if self.first_column == "Epoch":
UpperCAmelCase : Any = int(state.epoch )
else:
UpperCAmelCase : List[str] = state.global_step
UpperCAmelCase : str = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
UpperCAmelCase : str = re.sub(r"""\_loss$""" , """""" , A )
UpperCAmelCase : Optional[Any] = metrics.pop("""total_flos""" , A )
UpperCAmelCase : Optional[int] = metrics.pop("""epoch""" , A )
UpperCAmelCase : Union[str, Any] = metrics.pop(f'''{metric_key_prefix}_runtime''' , A )
UpperCAmelCase : List[str] = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , A )
UpperCAmelCase : Tuple = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , A )
UpperCAmelCase : Any = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , A )
for k, v in metrics.items():
if k == f'''{metric_key_prefix}_loss''':
UpperCAmelCase : List[Any] = v
else:
UpperCAmelCase : str = k.split("""_""" )
UpperCAmelCase : List[Any] = """ """.join([part.capitalize() for part in splits[1:]] )
UpperCAmelCase : Optional[int] = v
self.training_tracker.write_line(A )
self.training_tracker.remove_child()
UpperCAmelCase : str = None
# Evaluation takes a long time so we should force the next update.
UpperCAmelCase : str = True
def _lowercase( self , A , A , A , **A ) -> Any:
self.training_tracker.update(
state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=A )
UpperCAmelCase : Any = None
| 672
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672
| 1
|
from __future__ import annotations
import os
from collections.abc import Mapping
UpperCamelCase_ : List[Any] = tuple[int, int]
class __lowercase :
def __init__(self : int , snake_case : set[int] , snake_case : Mapping[EdgeT, int] ) -> None:
_lowercase : set[int] = vertices
_lowercase : dict[EdgeT, int] = {
(min(snake_case ), max(snake_case )): weight for edge, weight in edges.items()
}
def _a(self : Optional[int] , snake_case : EdgeT , snake_case : int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_lowercase : Any = weight
def _a(self : List[str] ) -> Graph:
_lowercase : Graph = Graph({min(self.vertices )} , {} )
_lowercase : EdgeT
_lowercase : int
_lowercase : EdgeT
_lowercase : int
while len(subgraph.vertices ) < len(self.vertices ):
_lowercase : str = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_lowercase : Union[str, Any] = edge
_lowercase : List[Any] = weight
subgraph.add_edge(snake_case , snake_case )
return subgraph
def UpperCamelCase ( _UpperCAmelCase : str = "p107_network.txt" ) -> int:
'''simple docstring'''
_lowercase : str = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
_lowercase : str = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
_lowercase : dict[EdgeT, int] = {}
_lowercase : list[str]
_lowercase : int
_lowercase : int
with open(_UpperCAmelCase ) as f:
_lowercase : Any = f.read().strip().split("\n" )
_lowercase : List[Any] = [line.split("," ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
_lowercase : Dict = int(adjaceny_matrix[edgea][edgea] )
_lowercase : Graph = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
_lowercase : Graph = graph.prims_algorithm()
_lowercase : int = sum(graph.edges.values() )
_lowercase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 461
|
def UpperCamelCase ( _UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] ) -> tuple[float, float]:
'''simple docstring'''
if not len(_UpperCAmelCase ) == len(_UpperCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_lowercase , _lowercase , _lowercase : List[str] = equationa
_lowercase , _lowercase , _lowercase : List[str] = equationa
# Calculate the determinants of the matrices
_lowercase : List[Any] = aa * ba - aa * ba
_lowercase : Tuple = ca * ba - ca * ba
_lowercase : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase : str = determinant_x / determinant
_lowercase : Union[str, Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 461
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : int , A : int , A : int ):
'''simple docstring'''
UpperCAmelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCamelCase__ ( ):
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50
| 1
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCamelCase : List[str] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_UpperCamelCase : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase:
"""simple docstring"""
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_lowerCamelCase )} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
__lowerCamelCase = field(
default=128 ,metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} ,)
__lowerCamelCase = field(
default=128 ,metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} ,)
__lowerCamelCase = field(
default=64 ,metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} ,)
__lowerCamelCase = field(
default=30 ,metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} ,)
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
__lowerCamelCase = field(
default=0.0 ,metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
__lowerCamelCase = field(
default=20 ,metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
__lowerCamelCase = field(
default=0 ,metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} ,)
__lowerCamelCase = field(default=1 ,metadata={'''help''': '''multiple threads for converting example to features'''} )
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = '''train'''
__lowerCamelCase = '''dev'''
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self: str ,a: SquadDataTrainingArguments ,a: PreTrainedTokenizer ,a: Optional[int] = None ,a: Union[str, Split] = Split.train ,a: Optional[bool] = False ,a: Optional[str] = None ,a: Optional[str] = "pt" ,):
__UpperCAmelCase = args
__UpperCAmelCase = is_language_sensitive
__UpperCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(a ,a ):
try:
__UpperCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
__UpperCAmelCase = mode
# Load data features from cache or dataset file
__UpperCAmelCase = 'v2' if args.version_2_with_negative else 'v1'
__UpperCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__UpperCAmelCase = cached_features_file + '.lock'
with FileLock(a ):
if os.path.exists(a ) and not args.overwrite_cache:
__UpperCAmelCase = time.time()
__UpperCAmelCase = torch.load(a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__UpperCAmelCase = self.old_features['features']
__UpperCAmelCase = self.old_features.get('dataset' ,a )
__UpperCAmelCase = self.old_features.get('examples' ,a )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" ,time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
' future run' )
else:
if mode == Split.dev:
__UpperCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__UpperCAmelCase = self.processor.get_train_examples(args.data_dir )
__UpperCAmelCase , __UpperCAmelCase = squad_convert_examples_to_features(
examples=self.examples ,tokenizer=a ,max_seq_length=args.max_seq_length ,doc_stride=args.doc_stride ,max_query_length=args.max_query_length ,is_training=mode == Split.train ,threads=args.threads ,return_dataset=a ,)
__UpperCAmelCase = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} ,a ,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self: int ):
return len(self.features )
def __getitem__( self: Optional[int] ,a: Dict ):
# Convert to Tensors and build dataset
__UpperCAmelCase = self.features[i]
__UpperCAmelCase = torch.tensor(feature.input_ids ,dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.attention_mask ,dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.token_type_ids ,dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.cls_index ,dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.p_mask ,dtype=torch.float )
__UpperCAmelCase = torch.tensor(feature.is_impossible ,dtype=torch.float )
__UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape ,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__UpperCAmelCase = torch.tensor(feature.start_position ,dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.end_position ,dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 396
|
'''simple docstring'''
def __snake_case ( lowerCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__UpperCAmelCase = sorted(string.lower() )
return len(lowerCAmelCase ) == len(set(lowerCAmelCase ) )
if __name__ == "__main__":
_UpperCamelCase : List[str] = input('Enter a string ').strip()
_UpperCamelCase : List[Any] = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 396
| 1
|
'''simple docstring'''
from __future__ import annotations
A__ : str ='Muhammad Umer Farooq'
A__ : Optional[int] ='MIT'
A__ : int ='1.0.0'
A__ : List[str] ='Muhammad Umer Farooq'
A__ : Union[str, Any] ='contact@muhammadumerfarooq.me'
A__ : List[Any] ='Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __A ( lowerCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase : Tuple ):
"""simple docstring"""
super().__init__()
__A : str = []
__A : Tuple = domain
def lowercase_( self : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : int ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__A : Union[str, Any] = parse.urljoin(self.domain , _lowerCamelCase )
self.urls.append(_lowerCamelCase )
def A_ ( __SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
return ".".join(get_sub_domain_name(lowerCamelCase_ ).split(""".""" )[-2:] )
def A_ ( __SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
return parse.urlparse(lowerCamelCase_ ).netloc
def A_ ( __SCREAMING_SNAKE_CASE : str = "https://github.com" ) -> List[Any]:
"""simple docstring"""
__A : List[str] = get_domain_name(lowerCamelCase_ )
# Initialize the parser
__A : Dict = Parser(lowerCamelCase_ )
try:
# Open URL
__A : Optional[Any] = requests.get(lowerCamelCase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__A : List[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__A : str = requests.get(lowerCamelCase_ )
# Get the valid email.
__A : Any = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowerCamelCase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowerCamelCase_ )
if __name__ == "__main__":
A__ : str =emails_from_url('https://github.com')
print(F"{len(emails)} emails found:")
print('\n'.join(sorted(emails)))
| 712
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def A_ ( __SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
"""simple docstring"""
__A : Dict = {}
__A : Optional[Any] = job["""started_at"""]
__A : Tuple = job["""completed_at"""]
__A : Optional[Any] = date_parser.parse(__SCREAMING_SNAKE_CASE )
__A : List[Any] = date_parser.parse(__SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__A : List[Any] = start
__A : Tuple = end
__A : int = duration_in_min
return job_info
def A_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=None ) -> Dict:
"""simple docstring"""
__A : List[Any] = None
if token is not None:
__A : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
__A : Tuple = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
__A : List[str] = requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).json()
__A : List[str] = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(__SCREAMING_SNAKE_CASE ) for job in result["""jobs"""]} )
__A : Dict = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__SCREAMING_SNAKE_CASE ):
__A : int = requests.get(url + F"&page={i + 2}" , headers=__SCREAMING_SNAKE_CASE ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(__SCREAMING_SNAKE_CASE ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
A__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
A__ : Any =parser.parse_args()
A__ : Any =get_job_time(args.workflow_run_id)
A__ : Dict =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"{k}: {v['duration']}")
| 499
| 0
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__snake_case : Tuple = random.Random()
def __lowerCamelCase ( __snake_case : Any, __snake_case : Optional[int]=1.0, __snake_case : Dict=None, __snake_case : Tuple=None ) -> Dict:
"""simple docstring"""
if rng is None:
A__ : Tuple =global_rng
A__ : int =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Union[str, Any]=4_00 , lowerCAmelCase_ : Tuple=20_00 , lowerCAmelCase_ : str=24 , lowerCAmelCase_ : Tuple=24 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : List[Any]=1_60_00 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=True , ) -> int:
'''simple docstring'''
A__ : Optional[int] =parent
A__ : Optional[int] =batch_size
A__ : Union[str, Any] =min_seq_length
A__ : Dict =max_seq_length
A__ : List[str] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : Optional[Any] =feature_size
A__ : Dict =num_mel_bins
A__ : List[Any] =padding_value
A__ : Dict =sampling_rate
A__ : int =return_attention_mask
A__ : Dict =do_normalize
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self : Dict , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
def _flatten(lowerCAmelCase_ : int ):
return list(itertools.chain(*lowerCAmelCase_ ) )
if equal_length:
A__ : int =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ : Tuple =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : List[Any] =[np.asarray(lowerCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
A__ : Any =SpeechaTextFeatureExtractionTester(self )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] ) -> List[str]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCAmelCase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase_ , axis=0 ) - 1 ) < 1e-3 ) )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Any =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : Optional[int] =[floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
A__ : str =[np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs]
# Test feature size
A__ : Any =feature_extractor(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
A__ : int =feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ : Dict =feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
# Test batched
A__ : Optional[int] =feature_extractor(lowerCAmelCase_ , return_tensors="""np""" ).input_features
A__ : Dict =feature_extractor(lowerCAmelCase_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : Any =[floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
A__ : Any =np.asarray(lowerCAmelCase_ )
A__ : Any =feature_extractor(lowerCAmelCase_ , return_tensors="""np""" ).input_features
A__ : Tuple =feature_extractor(lowerCAmelCase_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : str =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Optional[Any] =[floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
A__ : str =["""longest""", """max_length""", """do_not_pad"""]
A__ : Optional[int] =[None, 16, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Any =feature_extractor(
lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ )
A__ : Optional[Any] =inputs.input_features
A__ : str =inputs.attention_mask
A__ : Tuple =[np.sum(lowerCAmelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : List[str] =[floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
A__ : Any =["""longest""", """max_length""", """do_not_pad"""]
A__ : List[str] =[None, 16, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : List[str] =feature_extractor(
lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""np""" , return_attention_mask=lowerCAmelCase_ )
A__ : Optional[Any] =inputs.input_features
A__ : Dict =inputs.attention_mask
A__ : Optional[Any] =[np.sum(lowerCAmelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Any =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Union[str, Any] =[floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
A__ : Union[str, Any] =feature_extractor(
lowerCAmelCase_ , padding="""max_length""" , max_length=4 , truncation=lowerCAmelCase_ , return_tensors="""np""" , return_attention_mask=lowerCAmelCase_ , )
A__ : Tuple =inputs.input_features
A__ : List[Any] =inputs.attention_mask
A__ : str =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Optional[Any] =[floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
A__ : int =feature_extractor(
lowerCAmelCase_ , padding="""longest""" , max_length=4 , truncation=lowerCAmelCase_ , return_tensors="""np""" , return_attention_mask=lowerCAmelCase_ , )
A__ : Optional[int] =inputs.input_features
A__ : List[Any] =inputs.attention_mask
A__ : Tuple =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
A__ : Optional[Any] =[floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
A__ : List[Any] =feature_extractor(
lowerCAmelCase_ , padding="""longest""" , max_length=16 , truncation=lowerCAmelCase_ , return_tensors="""np""" , return_attention_mask=lowerCAmelCase_ , )
A__ : int =inputs.input_features
A__ : Optional[Any] =inputs.attention_mask
A__ : Optional[int] =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
import torch
A__ : Optional[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Dict =np.random.rand(1_00 , 32 ).astype(np.floataa )
A__ : Any =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[Any] =feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ : str =feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase__ ( self : int , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
from datasets import load_dataset
A__ : int =load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : int =ds.sort("""id""" ).select(range(lowerCAmelCase_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
# fmt: off
A__ : Optional[Any] =np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
A__ : Tuple =self._load_datasamples(1 )
A__ : Dict =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Dict =feature_extractor(lowerCAmelCase_ , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCAmelCase_ , atol=1e-4 ) )
| 215
|
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__snake_case : Dict = logging.getLogger()
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ : int =argparse.ArgumentParser()
parser.add_argument("""-f""" )
A__ : int =parser.parse_args()
return args.f
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> None:
'''simple docstring'''
A__ : List[Any] =logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any ) -> int:
'''simple docstring'''
A__ : Optional[Any] =get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_ ):
A__ : Optional[Any] =run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase_ , 0.666 )
@slow
@require_torch_non_multi_gpu
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any ="""
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCAmelCase_ )
A__ : Dict ="""
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase_ )
A__ : Tuple ="""
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase_ )
| 215
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( _UpperCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ['image_processor', 'tokenizer']
UpperCamelCase__ = 'ViTImageProcessor'
UpperCamelCase__ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self: List[Any] , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: List[str]=None , **__lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCamelCase , )
UpperCamelCase__: List[Any] = kwargs.pop("feature_extractor" )
UpperCamelCase__: Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self: List[Any] , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Any=None , **__lowerCamelCase: Dict ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
UpperCamelCase__: Optional[int] = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if visual_prompt is not None:
UpperCamelCase__: Any = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if images is not None:
UpperCamelCase__: Any = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if visual_prompt is not None and images is not None:
UpperCamelCase__: Optional[Any] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCamelCase__: Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCamelCase__: Dict = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def UpperCAmelCase_ ( self: str , *__lowerCamelCase: List[Any] , **__lowerCamelCase: str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase_ ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCamelCase , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCamelCase , )
return self.image_processor
| 719
|
from __future__ import annotations
def lowerCAmelCase_ ( A_ ,A_ ,A_):
if (voltage, current, resistance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if resistance < 0:
raise ValueError("Resistance cannot be negative")
if voltage == 0:
return {"voltage": float(current * resistance)}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221
| 0
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCAmelCase_ = getLogger(__name__)
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase = 8 ,UpperCAmelCase = 1024 ,UpperCAmelCase="val" ,UpperCAmelCase=None ,UpperCAmelCase=False ,UpperCAmelCase="summarization" ,UpperCAmelCase=None ,UpperCAmelCase=1 ,UpperCAmelCase = None ,UpperCAmelCase="" ,**UpperCAmelCase ,):
'''simple docstring'''
A__ = str(_lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' ,rank=_lowerCAmelCase )
A__ = Path(_lowerCAmelCase )
A__ = save_dir.joinpath(F"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(_lowerCAmelCase )
A__ = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).cuda()
if fpaa:
A__ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_lowerCAmelCase ,_lowerCAmelCase ) # update config with task specific params
A__ = generate_kwargs.pop('num_beams' ,model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
A__ = num_return_sequences
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
A__ = tokenizer.model_max_length
if prefix is None:
A__ = prefix or getattr(model.config ,'prefix' ,'' ) or ''
A__ = SeqaSeqDataset(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,max_target_length=1024 ,type_path=_lowerCAmelCase ,n_obs=_lowerCAmelCase ,prefix=_lowerCAmelCase ,**_lowerCAmelCase ,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
A__ = ds.make_sortish_sampler(_lowerCAmelCase ,distributed=_lowerCAmelCase ,add_extra_examples=_lowerCAmelCase ,shuffle=_lowerCAmelCase )
A__ = DataLoader(_lowerCAmelCase ,sampler=_lowerCAmelCase ,batch_size=_lowerCAmelCase ,collate_fn=ds.collate_fn )
A__ = []
for batch in tqdm(_lowerCAmelCase ):
A__ = model.generate(
input_ids=batch['input_ids'].to(model.device ) ,attention_mask=batch['attention_mask'].to(model.device ) ,num_return_sequences=_lowerCAmelCase ,num_beams=_lowerCAmelCase ,**_lowerCAmelCase ,)
A__ = tokenizer.batch_decode(_lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase ,clean_up_tokenization_spaces=_lowerCAmelCase )
A__ = batch['ids']
if num_return_sequences > 1:
A__ = chunks(_lowerCAmelCase ,_lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_lowerCAmelCase ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(_lowerCAmelCase ,_lowerCAmelCase )
return results, sampler.num_replicas
def _A ( ):
'''simple docstring'''
A__ = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' ,type=_lowerCAmelCase ,help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' ,type=_lowerCAmelCase ,help='like facebook/bart-large-cnn,t5-base, etc.' ,default='sshleifer/distilbart-xsum-12-3' ,)
parser.add_argument('--save_dir' ,type=_lowerCAmelCase ,help='where to save' ,default='tmp_gen' )
parser.add_argument('--max_source_length' ,type=_lowerCAmelCase ,default=_lowerCAmelCase )
parser.add_argument(
'--type_path' ,type=_lowerCAmelCase ,default='test' ,help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' ,type=_lowerCAmelCase ,default='summarization' ,help='used for task_specific_params + metrics' )
parser.add_argument('--bs' ,type=_lowerCAmelCase ,default=8 ,required=_lowerCAmelCase ,help='batch size' )
parser.add_argument(
'--local_rank' ,type=_lowerCAmelCase ,default=-1 ,required=_lowerCAmelCase ,help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' ,type=_lowerCAmelCase ,default=_lowerCAmelCase ,required=_lowerCAmelCase ,help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' ,type=_lowerCAmelCase ,default=1 ,required=_lowerCAmelCase ,help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' ,type=_lowerCAmelCase ,default=600 ,required=_lowerCAmelCase ,help='How long should master process wait for other processes to finish.' ,)
parser.add_argument('--src_lang' ,type=_lowerCAmelCase ,default=_lowerCAmelCase ,required=_lowerCAmelCase )
parser.add_argument('--tgt_lang' ,type=_lowerCAmelCase ,default=_lowerCAmelCase ,required=_lowerCAmelCase )
parser.add_argument(
'--prefix' ,type=_lowerCAmelCase ,required=_lowerCAmelCase ,default=_lowerCAmelCase ,help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' ,action='store_true' )
parser.add_argument('--debug' ,action='store_true' )
A__ = time.time()
A__ , A__ = parser.parse_known_args()
A__ = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"""parsed the following generate kwargs: {generate_kwargs}""" )
A__ = Path(args.save_dir + '_tmp' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) # this handles locking.
A__ = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(F"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
A__ = {}
if args.src_lang is not None:
A__ = args.src_lang
if args.tgt_lang is not None:
A__ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_lowerCAmelCase )
A__ , A__ = eval_data_dir(
args.data_dir ,_lowerCAmelCase ,args.model_name ,type_path=args.type_path ,bs=args.bs ,fpaa=args.fpaa ,task=args.task ,local_rank=args.local_rank ,n_obs=args.n_obs ,max_source_length=args.max_source_length ,num_return_sequences=args.num_return_sequences ,prefix=args.prefix ,dataset_kwargs=_lowerCAmelCase ,**_lowerCAmelCase ,)
if args.local_rank <= 0:
A__ = Path(args.save_dir )
save_dir.mkdir(exist_ok=_lowerCAmelCase )
A__ = gather_results_from_each_node(_lowerCAmelCase ,_lowerCAmelCase ,args.sync_timeout )
A__ = combine_partial_results(_lowerCAmelCase )
if args.num_return_sequences > 1:
A__ = save_dir.joinpath('pseudolabel_results.json' )
print(F"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(_lowerCAmelCase ,_lowerCAmelCase )
return
A__ = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(_lowerCAmelCase ) as f:
A__ = [x.rstrip() for x in f.readlines()][: len(_lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
A__ = 'translation' in args.task
A__ = calculate_bleu if calc_bleu else calculate_rouge
A__ = 'bleu' if calc_bleu else 'rouge'
A__ = score_fn(_lowerCAmelCase ,_lowerCAmelCase )
A__ = len(_lowerCAmelCase )
A__ = time.time() - start_time
A__ = round(runtime / metrics['n_obs'] ,4 )
A__ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
A__ = save_dir.joinpath(F"""{args.type_path}_{metric_name}.json""" )
save_json(_lowerCAmelCase ,_lowerCAmelCase ,indent=_lowerCAmelCase )
print(_lowerCAmelCase )
write_txt_file(_lowerCAmelCase ,save_dir.joinpath(F"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(_lowerCAmelCase ,save_dir.joinpath(F"""{args.type_path}.target""" ) )
else:
shutil.rmtree(_lowerCAmelCase )
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = []
for partial_result in partial_results:
records.extend(_lowerCAmelCase )
A__ = sorted(_lowerCAmelCase ,key=lambda UpperCAmelCase : x["id"] )
A__ = [x['pred'] for x in records]
return preds
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = time.time()
logger.info('waiting for all nodes to finish' )
A__ = None
while (time.time() - start_wait) < timeout:
A__ = list(save_dir.glob('rank_*.json' ) )
if len(_lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
A__ = lmap(_lowerCAmelCase ,_lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 531
|
A : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A : Optional[int] = [{'type': 'code', 'content': INSTALL_CONTENT}]
A : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 371
| 0
|
class lowercase :
def __init__( self : Dict , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : Any = name
SCREAMING_SNAKE_CASE__ : Dict = value
SCREAMING_SNAKE_CASE__ : Union[str, Any] = weight
def __repr__( self : Any ):
return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowercase__ ( self : Tuple ):
return self.value
def lowercase__ ( self : Optional[Any] ):
return self.name
def lowercase__ ( self : Optional[int] ):
return self.weight
def lowercase__ ( self : str ):
return self.value / self.weight
def a ( A__ , A__ , A__ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = []
for i in range(len(_lowercase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( A__ , A__ , A__ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = sorted(_lowercase , key=_lowercase , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = 0.0, 0.0
for i in range(len(_lowercase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ) -> List[str]:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
a_ :str = logging.get_logger(__name__)
a_ :List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
a_ :Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
a_ :Any = {
'allenai/led-base-16384': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def a ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ : str = bs[:]
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ : str = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = set()
SCREAMING_SNAKE_CASE__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = char
return pairs
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Any="replace" , _lowercase : List[Any]="<s>" , _lowercase : int="</s>" , _lowercase : Tuple="</s>" , _lowercase : Tuple="<s>" , _lowercase : Tuple="<unk>" , _lowercase : List[Any]="<pad>" , _lowercase : List[Any]="<mask>" , _lowercase : Optional[int]=False , **_lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
SCREAMING_SNAKE_CASE__ : Any = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
SCREAMING_SNAKE_CASE__ : Any = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
SCREAMING_SNAKE_CASE__ : int = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : Tuple = json.load(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : Tuple = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ : Dict = bytes_to_unicode()
SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ : Dict = merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase__ ( self : Optional[Any] ):
return len(self.encoder )
def lowercase__ ( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Tuple , _lowercase : List[Any] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : Optional[int] = tuple(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = get_pairs(_lowercase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ : Tuple = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = bigram
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : Tuple = 0
while i < len(_lowercase ):
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : Dict = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : List[Any] = tuple(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = new_word
if len(_lowercase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : Any = get_pairs(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = ''' '''.join(_lowercase )
SCREAMING_SNAKE_CASE__ : int = word
return word
def lowercase__ ( self : Optional[Any] , _lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for token in re.findall(self.pat , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(''' ''' ) )
return bpe_tokens
def lowercase__ ( self : int , _lowercase : List[str] ):
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : int , _lowercase : int ):
return self.decoder.get(_lowercase )
def lowercase__ ( self : List[str] , _lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : int = ''''''.join(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase__ ( self : List[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : int = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + '''\n''' )
SCREAMING_SNAKE_CASE__ : str = 0
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE__ : str = token_index
writer.write(''' '''.join(_lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase__ ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def lowercase__ ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Dict , _lowercase : Dict , _lowercase : List[str]=False , **_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : str = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ : Any = ''' ''' + text
return (text, kwargs)
def lowercase__ ( self : int , _lowercase : Union[Dict[str, EncodedInput], BatchEncoding] , _lowercase : Optional[int] = None , _lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE__ : Any = super()._pad(
encoded_inputs=_lowercase , max_length=_lowercase , padding_strategy=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE__ : str = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE__ : List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowercase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE__ : Dict = len(_lowercase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE__ : Any = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE__ : int = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 250
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (DEISMultistepScheduler,)
lowercase__ = (("num_inference_steps", 25),)
def UpperCamelCase_ ( self: Optional[Any], **a_: int ):
'''simple docstring'''
_snake_case : List[Any] = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**lowerCamelCase__ )
return config
def UpperCamelCase_ ( self: Any, a_: Optional[Any]=0, **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = dict(self.forward_default_kwargs )
_snake_case : str = kwargs.pop("""num_inference_steps""", lowerCamelCase__ )
_snake_case : Tuple = self.dummy_sample
_snake_case : Optional[int] = 0.1 * sample
_snake_case : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**lowerCamelCase__ )
_snake_case : Union[str, Any] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
_snake_case : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
_snake_case : List[Any] = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
_snake_case : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case , _snake_case : Any = sample, sample
for t in range(lowerCamelCase__, time_step + scheduler.config.solver_order + 1 ):
_snake_case : Optional[int] = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Dict, a_: str=0, **a_: Any ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Dict = kwargs.pop("""num_inference_steps""", lowerCamelCase__ )
_snake_case : Dict = self.dummy_sample
_snake_case : Union[str, Any] = 0.1 * sample
_snake_case : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : List[str] = self.get_scheduler_config()
_snake_case : List[str] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
_snake_case : List[str] = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case : Dict = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ).prev_sample
_snake_case : Tuple = new_scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: Union[str, Any], a_: Dict=None, **a_: Optional[int] ):
'''simple docstring'''
if scheduler is None:
_snake_case : List[Any] = self.scheduler_classes[0]
_snake_case : Dict = self.get_scheduler_config(**lowerCamelCase__ )
_snake_case : Union[str, Any] = scheduler_class(**lowerCamelCase__ )
_snake_case : List[Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**lowerCamelCase__ )
_snake_case : Optional[int] = scheduler_class(**lowerCamelCase__ )
_snake_case : Any = 10
_snake_case : List[Any] = self.dummy_model()
_snake_case : int = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : str = model(lowerCamelCase__, lowerCamelCase__ )
_snake_case : Dict = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ).prev_sample
return sample
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[str] = dict(self.forward_default_kwargs )
_snake_case : Union[str, Any] = kwargs.pop("""num_inference_steps""", lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Any = scheduler_class(**lowerCamelCase__ )
_snake_case : Dict = self.dummy_sample
_snake_case : int = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__, """set_timesteps""" ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__, """set_timesteps""" ):
_snake_case : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
_snake_case : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
_snake_case : int = scheduler.timesteps[5]
_snake_case : Union[str, Any] = scheduler.timesteps[6]
_snake_case : Optional[Any] = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ).prev_sample
_snake_case : str = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[Any] = DEISMultistepScheduler(**self.get_scheduler_config() )
_snake_case : List[Any] = self.full_loop(scheduler=lowerCamelCase__ )
_snake_case : List[str] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
_snake_case : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_snake_case : Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_snake_case : int = UniPCMultistepScheduler.from_config(scheduler.config )
_snake_case : int = DEISMultistepScheduler.from_config(scheduler.config )
_snake_case : Optional[int] = self.full_loop(scheduler=lowerCamelCase__ )
_snake_case : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__, prediction_type=lowerCamelCase__, sample_max_value=lowerCamelCase__, algorithm_type="""deis""", solver_order=lowerCamelCase__, solver_type=lowerCamelCase__, )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__, solver_type=lowerCamelCase__, prediction_type=lowerCamelCase__, algorithm_type=lowerCamelCase__, )
_snake_case : Dict = self.full_loop(
solver_order=lowerCamelCase__, solver_type=lowerCamelCase__, prediction_type=lowerCamelCase__, algorithm_type=lowerCamelCase__, )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__, time_step=0 )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop(prediction_type="""v_prediction""" )
_snake_case : List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(thresholding=lowerCamelCase__, dynamic_thresholding_ratio=0 )
_snake_case : str = scheduler_class(**lowerCamelCase__ )
_snake_case : Any = 10
_snake_case : str = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : int = model(lowerCamelCase__, lowerCamelCase__ )
_snake_case : Tuple = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 609
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 65_536 , lowerCamelCase__ = None , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 0 , lowerCamelCase__ = "fourier" , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = 0.0 , lowerCamelCase__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase__ = "UNetMidBlock1D" , lowerCamelCase__ = None , lowerCamelCase__ = (32, 32, 64) , lowerCamelCase__ = None , lowerCamelCase__ = 8 , lowerCamelCase__ = 1 , lowerCamelCase__ = False , ) -> Dict:
'''simple docstring'''
super().__init__()
__lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
__lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase__ , log=lowerCamelCase__ , flip_sin_to_cos=lowerCamelCase__ )
__lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase__ , downscale_freq_shift=lowerCamelCase__ )
__lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
__lowerCamelCase = block_out_channels[0] * 4
__lowerCamelCase = TimestepEmbedding(
in_channels=lowerCamelCase__ , time_embed_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ , out_dim=block_out_channels[0] , )
__lowerCamelCase = nn.ModuleList([] )
__lowerCamelCase = None
__lowerCamelCase = nn.ModuleList([] )
__lowerCamelCase = None
# down
__lowerCamelCase = in_channels
for i, down_block_type in enumerate(lowerCamelCase__ ):
__lowerCamelCase = output_channel
__lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__lowerCamelCase = i == len(lowerCamelCase__ ) - 1
__lowerCamelCase = get_down_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase__ )
# mid
__lowerCamelCase = get_mid_block(
lowerCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase__ , add_downsample=lowerCamelCase__ , )
# up
__lowerCamelCase = list(reversed(lowerCamelCase__ ) )
__lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
__lowerCamelCase = out_channels
else:
__lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
__lowerCamelCase = output_channel
__lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels
)
__lowerCamelCase = i == len(lowerCamelCase__ ) - 1
__lowerCamelCase = get_up_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase__ )
__lowerCamelCase = output_channel
# out
__lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
__lowerCamelCase = get_out_block(
out_block_type=lowerCamelCase__ , num_groups_out=lowerCamelCase__ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase__ , act_fn=lowerCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
__lowerCamelCase = timestep
if not torch.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(sample.device )
__lowerCamelCase = self.time_proj(lowerCamelCase__ )
if self.config.use_timestep_embedding:
__lowerCamelCase = self.time_mlp(lowerCamelCase__ )
else:
__lowerCamelCase = timestep_embed[..., None]
__lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__lowerCamelCase = ()
for downsample_block in self.down_blocks:
__lowerCamelCase , __lowerCamelCase = downsample_block(hidden_states=lowerCamelCase__ , temb=lowerCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__lowerCamelCase = self.mid_block(lowerCamelCase__ , lowerCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__lowerCamelCase = down_block_res_samples[-1:]
__lowerCamelCase = down_block_res_samples[:-1]
__lowerCamelCase = upsample_block(lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , temb=lowerCamelCase__ )
# 5. post-process
if self.out_block:
__lowerCamelCase = self.out_block(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__ )
| 469
| 0
|
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowerCamelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class lowerCamelCase__ ( __snake_case ):
def __init__( self , **lowerCAmelCase__ ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
"""simple docstring"""
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self , **lowerCAmelCase__ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :str ={}
if "candidate_labels" in kwargs:
_UpperCamelCase :str =kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_UpperCamelCase :Dict =kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="This is a sound of {}." ) -> Tuple:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_UpperCamelCase :List[Any] =requests.get(lowerCAmelCase__ ).content
else:
with open(lowerCAmelCase__ , """rb""" ) as f:
_UpperCamelCase :Optional[int] =f.read()
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase :Dict =ffmpeg_read(lowerCAmelCase__ , self.feature_extractor.sampling_rate )
if not isinstance(lowerCAmelCase__ , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_UpperCamelCase :List[str] =self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
_UpperCamelCase :Optional[Any] =candidate_labels
_UpperCamelCase :Union[str, Any] =[hypothesis_template.format(lowerCAmelCase__ ) for x in candidate_labels]
_UpperCamelCase :Tuple =self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework , padding=lowerCAmelCase__ )
_UpperCamelCase :Tuple =[text_inputs]
return inputs
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Optional[int] =model_inputs.pop("""candidate_labels""" )
_UpperCamelCase :Any =model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , lowerCAmelCase__ ):
_UpperCamelCase :Union[str, Any] =text_inputs[0]
else:
# Batching case.
_UpperCamelCase :List[Any] =text_inputs[0][0]
_UpperCamelCase :Any =self.model(**lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase :Union[str, Any] ={
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Optional[int] =model_outputs.pop("""candidate_labels""" )
_UpperCamelCase :Tuple =model_outputs["""logits"""][0]
if self.framework == "pt":
_UpperCamelCase :Optional[int] =logits.softmax(dim=0 )
_UpperCamelCase :int =probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_UpperCamelCase :int =[
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : -x[0] )
]
return result
| 512
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=0.9 , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :List[str] =size if size is not None else {"""shortest_edge""": 30}
_UpperCamelCase :str =crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
_UpperCamelCase :Tuple =parent
_UpperCamelCase :Optional[int] =batch_size
_UpperCamelCase :Tuple =num_channels
_UpperCamelCase :int =min_resolution
_UpperCamelCase :Union[str, Any] =max_resolution
_UpperCamelCase :Tuple =do_resize_and_center_crop
_UpperCamelCase :Union[str, Any] =size
_UpperCamelCase :Union[str, Any] =crop_pct
_UpperCamelCase :Tuple =crop_size
_UpperCamelCase :List[str] =do_normalize
_UpperCamelCase :Any =image_mean
_UpperCamelCase :Optional[Any] =image_std
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( __snake_case , unittest.TestCase ):
__UpperCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Dict =PoolFormerImageProcessingTester(self )
@property
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """crop_pct""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :Dict =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
_UpperCamelCase :Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Any =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase :List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase :int =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCamelCase :Optional[Any] =image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase :int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase :List[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCamelCase :Tuple =image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase :Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase :Dict =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCamelCase :Tuple =image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 512
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
__magic_name__ : Tuple = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
__magic_name__ : Any = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__magic_name__ : str = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
__magic_name__ : int = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_6000,
"return_attention_mask": False,
"do_normalize": True,
}
__magic_name__ : Tuple = tempfile.mkdtemp()
__magic_name__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ : List[Any] = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
# load decoder from hub
__magic_name__ : List[str] = "hf-internal-testing/ngram-beam-search-decoder"
def lowerCAmelCase__ ( self: Dict , **__UpperCamelCase: Any ) -> Union[str, Any]:
__magic_name__ : List[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__UpperCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCAmelCase__ ( self: int , **__UpperCamelCase: Tuple ) -> str:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCAmelCase__ ( self: int , **__UpperCamelCase: Union[str, Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__UpperCamelCase )
def lowerCAmelCase__ ( self: List[str] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
__magic_name__ : Union[str, Any] = self.get_tokenizer()
__magic_name__ : Tuple = self.get_feature_extractor()
__magic_name__ : Tuple = self.get_decoder()
__magic_name__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : List[str] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __UpperCamelCase )
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
__magic_name__ : Dict = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__magic_name__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
__magic_name__ : str = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(__UpperCamelCase , "include" ):
WavaVecaProcessorWithLM(
tokenizer=__UpperCamelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase__ ( self: str ) -> int:
__magic_name__ : str = self.get_feature_extractor()
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : Tuple = self.get_decoder()
__magic_name__ : str = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
__magic_name__ : Optional[Any] = floats_list((3, 1000) )
__magic_name__ : List[Any] = feature_extractor(__UpperCamelCase , return_tensors="np" )
__magic_name__ : List[str] = processor(__UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self: str ) -> Dict:
__magic_name__ : Union[str, Any] = self.get_feature_extractor()
__magic_name__ : List[str] = self.get_tokenizer()
__magic_name__ : Dict = self.get_decoder()
__magic_name__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
__magic_name__ : Any = "This is a test string"
__magic_name__ : int = processor(text=__UpperCamelCase )
__magic_name__ : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self: List[str] , __UpperCamelCase: Dict=(2, 10, 16) , __UpperCamelCase: Optional[Any]=77 ) -> Any:
np.random.seed(__UpperCamelCase )
return np.random.rand(*__UpperCamelCase )
def lowerCAmelCase__ ( self: Any ) -> Any:
__magic_name__ : Tuple = self.get_feature_extractor()
__magic_name__ : List[Any] = self.get_tokenizer()
__magic_name__ : Optional[Any] = self.get_decoder()
__magic_name__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
__magic_name__ : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__magic_name__ : str = processor.decode(__UpperCamelCase )
__magic_name__ : List[str] = decoder.decode_beams(__UpperCamelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def lowerCAmelCase__ ( self: str , __UpperCamelCase: Union[str, Any] ) -> int:
__magic_name__ : List[Any] = self.get_feature_extractor()
__magic_name__ : List[str] = self.get_tokenizer()
__magic_name__ : Tuple = self.get_decoder()
__magic_name__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
__magic_name__ : Tuple = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__magic_name__ : Optional[Any] = processor.batch_decode(__UpperCamelCase )
else:
with get_context(__UpperCamelCase ).Pool() as pool:
__magic_name__ : Tuple = processor.batch_decode(__UpperCamelCase , __UpperCamelCase )
__magic_name__ : List[Any] = list(__UpperCamelCase )
with get_context("fork" ).Pool() as p:
__magic_name__ : List[str] = decoder.decode_beams_batch(__UpperCamelCase , __UpperCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__UpperCamelCase , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(__UpperCamelCase , decoded_processor.logit_score )
self.assertListEqual(__UpperCamelCase , decoded_processor.lm_score )
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
__magic_name__ : int = self.get_feature_extractor()
__magic_name__ : Any = self.get_tokenizer()
__magic_name__ : Union[str, Any] = self.get_decoder()
__magic_name__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
__magic_name__ : List[Any] = self._get_dummy_logits()
__magic_name__ : Union[str, Any] = 15
__magic_name__ : Any = -2_0.0
__magic_name__ : List[str] = -4.0
__magic_name__ : Tuple = processor.batch_decode(
__UpperCamelCase , beam_width=__UpperCamelCase , beam_prune_logp=__UpperCamelCase , token_min_logp=__UpperCamelCase , )
__magic_name__ : List[str] = decoded_processor_out.text
__magic_name__ : int = list(__UpperCamelCase )
with get_context("fork" ).Pool() as pool:
__magic_name__ : Union[str, Any] = decoder.decode_beams_batch(
__UpperCamelCase , __UpperCamelCase , beam_width=__UpperCamelCase , beam_prune_logp=__UpperCamelCase , token_min_logp=__UpperCamelCase , )
__magic_name__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
__magic_name__ : str = [d[0][2] for d in decoded_decoder_out]
__magic_name__ : Union[str, Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __UpperCamelCase )
self.assertTrue(np.array_equal(__UpperCamelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , __UpperCamelCase , atol=1E-3 ) )
self.assertTrue(np.array_equal(__UpperCamelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , __UpperCamelCase , atol=1E-3 ) )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[int]:
__magic_name__ : int = self.get_feature_extractor()
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : Union[str, Any] = self.get_decoder()
__magic_name__ : List[str] = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
__magic_name__ : Any = self._get_dummy_logits()
__magic_name__ : Optional[int] = 2.0
__magic_name__ : str = 5.0
__magic_name__ : List[str] = -2_0.0
__magic_name__ : Any = True
__magic_name__ : List[Any] = processor.batch_decode(
__UpperCamelCase , alpha=__UpperCamelCase , beta=__UpperCamelCase , unk_score_offset=__UpperCamelCase , lm_score_boundary=__UpperCamelCase , )
__magic_name__ : List[str] = decoded_processor_out.text
__magic_name__ : Dict = list(__UpperCamelCase )
decoder.reset_params(
alpha=__UpperCamelCase , beta=__UpperCamelCase , unk_score_offset=__UpperCamelCase , lm_score_boundary=__UpperCamelCase , )
with get_context("fork" ).Pool() as pool:
__magic_name__ : Optional[int] = decoder.decode_beams_batch(
__UpperCamelCase , __UpperCamelCase , )
__magic_name__ : Any = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __UpperCamelCase )
__magic_name__ : Dict = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , __UpperCamelCase )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Any:
__magic_name__ : int = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__magic_name__ : Tuple = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__ : int = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
__magic_name__ : Optional[Any] = os.listdir(__UpperCamelCase )
__magic_name__ : str = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
__magic_name__ : Dict = snapshot_download("hf-internal-testing/processor_with_lm" )
__magic_name__ : List[str] = WavaVecaProcessorWithLM.from_pretrained(__UpperCamelCase )
__magic_name__ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__ : List[str] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
__magic_name__ : List[str] = os.listdir(__UpperCamelCase )
__magic_name__ : List[str] = os.listdir(__UpperCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
__magic_name__ : int = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__magic_name__ : Union[str, Any] = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
__magic_name__ : Union[str, Any] = floats_list((3, 1000) )
__magic_name__ : int = processor_wavaveca(__UpperCamelCase , return_tensors="np" )
__magic_name__ : str = processor_auto(__UpperCamelCase , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__magic_name__ : Tuple = self._get_dummy_logits()
__magic_name__ : List[str] = processor_wavaveca.batch_decode(__UpperCamelCase )
__magic_name__ : str = processor_auto.batch_decode(__UpperCamelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase__ ( self: str ) -> str:
__magic_name__ : Any = self.get_feature_extractor()
__magic_name__ : Tuple = self.get_tokenizer()
__magic_name__ : Optional[int] = self.get_decoder()
__magic_name__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def lowerCAmelCase__ ( __UpperCamelCase: List[Any] , __UpperCamelCase: int ) -> Dict:
__magic_name__ : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
__magic_name__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__magic_name__ : Optional[Any] = self._get_dummy_logits()[0]
__magic_name__ : Union[str, Any] = processor.decode(__UpperCamelCase , output_word_offsets=__UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
__magic_name__ : List[str] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__magic_name__ : List[Any] = self._get_dummy_logits()
__magic_name__ : Tuple = processor.batch_decode(__UpperCamelCase , output_word_offsets=__UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(__UpperCamelCase , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase__ ( self: List[str] ) -> str:
import torch
__magic_name__ : List[Any] = load_dataset("common_voice" , "en" , split="train" , streaming=__UpperCamelCase )
__magic_name__ : Tuple = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6000 ) )
__magic_name__ : List[Any] = iter(__UpperCamelCase )
__magic_name__ : Dict = next(__UpperCamelCase )
__magic_name__ : List[Any] = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
__magic_name__ : str = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__magic_name__ : List[str] = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
__magic_name__ : Optional[Any] = model(__UpperCamelCase ).logits.cpu().numpy()
__magic_name__ : Optional[Any] = processor.decode(logits[0] , output_word_offsets=__UpperCamelCase )
__magic_name__ : List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__magic_name__ : Optional[int] = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
__magic_name__ : Union[str, Any] = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(__UpperCamelCase , "word" ) ) , __UpperCamelCase )
self.assertEqual(" ".join(self.get_from_offsets(__UpperCamelCase , "word" ) ) , output.text )
# output times
__magic_name__ : List[Any] = torch.tensor(self.get_from_offsets(__UpperCamelCase , "start_time" ) )
__magic_name__ : List[Any] = torch.tensor(self.get_from_offsets(__UpperCamelCase , "end_time" ) )
# fmt: off
__magic_name__ : Any = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__magic_name__ : Optional[Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=0.0_1 ) )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=0.0_1 ) )
| 436
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class _snake_case ( snake_case_ ):
'''simple docstring'''
__snake_case = "encodec"
def __init__( self: Any , __UpperCamelCase: List[Any]=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , __UpperCamelCase: Dict=2_4000 , __UpperCamelCase: str=1 , __UpperCamelCase: Union[str, Any]=False , __UpperCamelCase: List[str]=None , __UpperCamelCase: Tuple=None , __UpperCamelCase: Optional[int]=128 , __UpperCamelCase: List[str]=32 , __UpperCamelCase: Union[str, Any]=1 , __UpperCamelCase: List[Any]=[8, 5, 4, 2] , __UpperCamelCase: List[Any]="weight_norm" , __UpperCamelCase: Tuple=7 , __UpperCamelCase: Union[str, Any]=7 , __UpperCamelCase: List[Any]=3 , __UpperCamelCase: Tuple=2 , __UpperCamelCase: str=True , __UpperCamelCase: List[Any]="reflect" , __UpperCamelCase: List[str]=2 , __UpperCamelCase: Optional[int]=2 , __UpperCamelCase: Optional[int]=1.0 , __UpperCamelCase: int=1024 , __UpperCamelCase: Union[str, Any]=None , __UpperCamelCase: List[Any]=True , **__UpperCamelCase: Any , ) -> List[Any]:
__magic_name__ : Optional[int] = target_bandwidths
__magic_name__ : Optional[int] = sampling_rate
__magic_name__ : int = audio_channels
__magic_name__ : str = normalize
__magic_name__ : Dict = chunk_length_s
__magic_name__ : Union[str, Any] = overlap
__magic_name__ : Optional[int] = hidden_size
__magic_name__ : int = num_filters
__magic_name__ : Optional[int] = num_residual_layers
__magic_name__ : Tuple = upsampling_ratios
__magic_name__ : Union[str, Any] = norm_type
__magic_name__ : Dict = kernel_size
__magic_name__ : Union[str, Any] = last_kernel_size
__magic_name__ : Union[str, Any] = residual_kernel_size
__magic_name__ : Tuple = dilation_growth_rate
__magic_name__ : Optional[Any] = use_causal_conv
__magic_name__ : int = pad_mode
__magic_name__ : str = compress
__magic_name__ : Dict = num_lstm_layers
__magic_name__ : Tuple = trim_right_ratio
__magic_name__ : List[str] = codebook_size
__magic_name__ : List[Any] = codebook_dim if codebook_dim is not None else hidden_size
__magic_name__ : Optional[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**__UpperCamelCase )
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCAmelCase__ ( self: Any ) -> int:
__magic_name__ : Dict = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCAmelCase__ ( self: List[Any] ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 436
| 1
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=99 ,SCREAMING_SNAKE_CASE_=36 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=37 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=6 ,SCREAMING_SNAKE_CASE_=6 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=1000 ,):
'''simple docstring'''
snake_case : Optional[int] = parent
snake_case : List[str] = batch_size
snake_case : str = num_channels
snake_case : int = image_size
snake_case : str = patch_size
snake_case : Dict = is_training
snake_case : Any = use_input_mask
snake_case : Dict = use_token_type_ids
snake_case : List[Any] = use_labels
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : List[Any] = intermediate_size
snake_case : Union[str, Any] = hidden_act
snake_case : List[Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : str = max_position_embeddings
snake_case : List[Any] = type_vocab_size
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Any = initializer_range
snake_case : int = coordinate_size
snake_case : List[str] = shape_size
snake_case : Optional[Any] = num_labels
snake_case : int = num_choices
snake_case : List[str] = scope
snake_case : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case : Tuple = text_seq_length
snake_case : Optional[int] = (image_size // patch_size) ** 2 + 1
snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
snake_case : List[Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case : int = bbox[i, j, 3]
snake_case : Union[str, Any] = bbox[i, j, 1]
snake_case : List[str] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case : Optional[Any] = bbox[i, j, 2]
snake_case : Optional[int] = bbox[i, j, 0]
snake_case : Optional[Any] = tmp_coordinate
snake_case : int = tf.constant(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Union[str, Any] = None
if self.use_input_mask:
snake_case : List[str] = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case : Any = None
if self.use_token_type_ids:
snake_case : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
snake_case : Dict = None
snake_case : Tuple = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
snake_case : Union[str, Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[int] = TFLayoutLMvaModel(config=SCREAMING_SNAKE_CASE_ )
# text + image
snake_case : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ,pixel_values=SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = model(
SCREAMING_SNAKE_CASE_ ,bbox=SCREAMING_SNAKE_CASE_ ,pixel_values=SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,token_type_ids=SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ ,)
snake_case : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ,bbox=SCREAMING_SNAKE_CASE_ ,pixel_values=SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case : Tuple = model(SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case : int = model({"""pixel_values""": pixel_values} ,training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : str = self.num_labels
snake_case : List[Any] = TFLayoutLMvaForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = model(
SCREAMING_SNAKE_CASE_ ,bbox=SCREAMING_SNAKE_CASE_ ,pixel_values=SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,token_type_ids=SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[int] = self.num_labels
snake_case : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = model(
SCREAMING_SNAKE_CASE_ ,bbox=SCREAMING_SNAKE_CASE_ ,pixel_values=SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,token_type_ids=SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = 2
snake_case : Optional[int] = TFLayoutLMvaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
snake_case : int = model(
SCREAMING_SNAKE_CASE_ ,bbox=SCREAMING_SNAKE_CASE_ ,pixel_values=SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,token_type_ids=SCREAMING_SNAKE_CASE_ ,start_positions=SCREAMING_SNAKE_CASE_ ,end_positions=SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = self.prepare_config_and_inputs()
((snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case)) : Dict = config_and_inputs
snake_case : Any = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _A ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase : List[str] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Any = False
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return True
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
snake_case : List[str] = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
snake_case : List[str] = {
k: tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ ,1 ) ,(1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(SCREAMING_SNAKE_CASE_ ,tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
snake_case : Tuple = tf.ones(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE_ ):
snake_case : Optional[Any] = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
snake_case : Union[str, Any] = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE_ ):
snake_case : Dict = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE_ ):
snake_case : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=tf.intaa )
return inputs_dict
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = TFLayoutLMvaModelTester(self )
snake_case : List[Any] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE_ ,hidden_size=37 )
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
if getattr(SCREAMING_SNAKE_CASE_ ,"""hf_compute_loss""" ,SCREAMING_SNAKE_CASE_ ):
# The number of elements in the loss should be the same as the number of elements in the label
snake_case : Dict = self._prepare_for_class(inputs_dict.copy() ,SCREAMING_SNAKE_CASE_ ,return_labels=SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() ,reverse=SCREAMING_SNAKE_CASE_ )[0]
]
snake_case : int = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
snake_case : Optional[Any] = self._prepare_for_class(inputs_dict.copy() ,SCREAMING_SNAKE_CASE_ ,return_labels=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = prepared_for_class.pop("""input_ids""" )
snake_case : List[str] = model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
snake_case : List[str] = self._prepare_for_class(inputs_dict.copy() ,SCREAMING_SNAKE_CASE_ ,return_labels=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
snake_case : Dict = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
snake_case : Dict = -100
snake_case : int = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
snake_case : List[Any] = self._prepare_for_class(inputs_dict.copy() ,SCREAMING_SNAKE_CASE_ ,return_labels=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
snake_case : List[Any] = self._prepare_for_class(inputs_dict.copy() ,SCREAMING_SNAKE_CASE_ ,return_labels=SCREAMING_SNAKE_CASE_ )
# Get keys that were added with the _prepare_for_class function
snake_case : int = prepared_for_class.keys() - inputs_dict.keys()
snake_case : List[str] = inspect.signature(model.call ).parameters
snake_case : Optional[Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
snake_case : Optional[Any] = {0: """input_ids"""}
for label_key in label_keys:
snake_case : List[str] = signature_names.index(SCREAMING_SNAKE_CASE_ )
snake_case : int = label_key
snake_case : Union[str, Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
snake_case : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
snake_case : Dict = prepared_for_class[value]
snake_case : Union[str, Any] = tuple(SCREAMING_SNAKE_CASE_ )
# Send to model
snake_case : str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def snake_case_ ( self ):
'''simple docstring'''
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : Tuple = type
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Any = TFLayoutLMvaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class _A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE_ ) if is_vision_available() else None
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
snake_case : Tuple = self.default_image_processor
snake_case : List[str] = prepare_img()
snake_case : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_ ,return_tensors="""tf""" ).pixel_values
snake_case : Dict = tf.constant([[1, 2]] )
snake_case : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) ,axis=0 )
# forward pass
snake_case : Any = model(input_ids=SCREAMING_SNAKE_CASE_ ,bbox=SCREAMING_SNAKE_CASE_ ,pixel_values=SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ )
# verify the logits
snake_case : Dict = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape ,SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,SCREAMING_SNAKE_CASE_ ,atol=1E-4 ) )
| 315
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[Any] = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Any = '''realm'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30522 ,SCREAMING_SNAKE_CASE_=768 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_=3072 ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=256 ,SCREAMING_SNAKE_CASE_=10 ,SCREAMING_SNAKE_CASE_=1E-3 ,SCREAMING_SNAKE_CASE_=5 ,SCREAMING_SNAKE_CASE_=320 ,SCREAMING_SNAKE_CASE_=13353718 ,SCREAMING_SNAKE_CASE_=5000 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
# Common config
snake_case : Tuple = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Any = hidden_size
snake_case : Optional[int] = retriever_proj_size
snake_case : Optional[int] = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : Tuple = num_candidates
snake_case : List[Any] = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : List[Any] = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Dict = initializer_range
snake_case : Optional[Any] = type_vocab_size
snake_case : Dict = layer_norm_eps
# Reader config
snake_case : List[Any] = span_hidden_size
snake_case : Union[str, Any] = max_span_width
snake_case : Any = reader_layer_norm_eps
snake_case : Optional[Any] = reader_beam_size
snake_case : Union[str, Any] = reader_seq_len
# Retrieval config
snake_case : Optional[Any] = num_block_records
snake_case : List[str] = searcher_beam_size
| 315
| 1
|
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int ):
__a : Dict = abs(lowerCAmelCase_ )
__a : Optional[int] = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def __magic_name__ ( _lowerCamelCase : int ):
__a : Optional[int] = abs(lowerCAmelCase_ )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def __magic_name__ ( _lowerCamelCase : int ):
return sum(int(lowerCAmelCase_ ) for c in str(abs(lowerCAmelCase_ ) ) )
def __magic_name__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__a : Dict = F'''{func.__name__}({value})'''
__a : List[Any] = timeit(F'''__main__.{call}''' , setup="""import __main__""" )
print(F'''{call:56} = {func(lowerCAmelCase_ )} -- {timing:.4f} seconds''' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowerCAmelCase_ , lowerCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 581
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = StableDiffusionInstructPixaPixPipeline
_A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
_A : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
_A : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowercase : Any = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__lowercase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowercase : Optional[Any] = CLIPTextModel(__a )
__lowercase : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase ( self : int , __a : Union[str, Any] , __a : int=0 ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__lowercase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase : Union[str, Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" )
if str(__a ).startswith("""mps""" ):
__lowercase : List[Any] = torch.manual_seed(__a )
else:
__lowercase : List[Any] = torch.Generator(device=__a ).manual_seed(__a )
__lowercase : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase : Tuple = self.get_dummy_components()
__lowercase : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__a )
__lowercase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__lowercase : Union[str, Any] = self.get_dummy_inputs(__a )
__lowercase : Optional[Any] = sd_pipe(**__a ).images
__lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase : Any = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase : int = self.get_dummy_components()
__lowercase : int = StableDiffusionInstructPixaPixPipeline(**__a )
__lowercase : Tuple = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__lowercase : int = self.get_dummy_inputs(__a )
__lowercase : List[Any] = """french fries"""
__lowercase : Dict = sd_pipe(**__a , negative_prompt=__a )
__lowercase : Union[str, Any] = output.images
__lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase : str = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase : Optional[int] = self.get_dummy_components()
__lowercase : str = StableDiffusionInstructPixaPixPipeline(**__a )
__lowercase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__lowercase : List[str] = self.get_dummy_inputs(__a )
__lowercase : Union[str, Any] = [inputs["""prompt"""]] * 2
__lowercase : Optional[int] = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
__lowercase : List[str] = torch.from_numpy(__a ).unsqueeze(0 ).to(__a )
__lowercase : str = image / 2 + 0.5
__lowercase : Any = image.permute(0 , 3 , 1 , 2 )
__lowercase : Optional[int] = image.repeat(2 , 1 , 1 , 1 )
__lowercase : Tuple = sd_pipe(**__a ).images
__lowercase : Union[str, Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__lowercase : Optional[int] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase : Optional[int] = self.get_dummy_components()
__lowercase : Dict = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
__lowercase : List[Any] = StableDiffusionInstructPixaPixPipeline(**__a )
__lowercase : Tuple = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__lowercase : Tuple = self.get_dummy_inputs(__a )
__lowercase : int = sd_pipe(**__a ).images
__lowercase : List[str] = image[0, -3:, -3:, -1]
__lowercase : Any = [round(__a , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(__a ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__lowercase : List[str] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : Any = self.get_dummy_components()
__lowercase : Dict = StableDiffusionInstructPixaPixPipeline(**__a )
__lowercase : Any = VaeImageProcessor(do_resize=__a , do_normalize=__a )
__lowercase : List[str] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Union[str, Any] = pipe(**self.get_dummy_inputs_by_type(__a , input_image_type="""pt""" ) )[0]
__lowercase : Tuple = components["""vae"""]
__lowercase : Tuple = self.get_dummy_inputs_by_type(__a , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__lowercase : Tuple = vae.encode(inputs[image_param] ).latent_dist.mode()
__lowercase : Optional[Any] = pipe(**__a )[0]
__lowercase : Optional[int] = np.abs(out - out_latents_inputs ).max()
self.assertLess(__a , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[Any] , __a : int=0 ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = torch.manual_seed(__a )
__lowercase : Any = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
__lowercase : Optional[int] = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__lowercase : Tuple = self.get_inputs()
__lowercase : Union[str, Any] = pipe(**__a ).images
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowercase : Dict = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__a )
__lowercase : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__lowercase : Tuple = self.get_inputs()
__lowercase : Any = pipe(**__a ).images
__lowercase : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowercase : Tuple = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__a )
__lowercase : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__lowercase : int = self.get_inputs()
__lowercase : Dict = pipe(**__a ).images
__lowercase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowercase : Tuple = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = 0
def callback_fn(__a : int , __a : int , __a : torch.FloatTensor ) -> None:
__lowercase : Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowercase : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__lowercase : List[str] = latents[0, -3:, -3:, -1]
__lowercase : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__lowercase : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__lowercase : List[str] = latents[0, -3:, -3:, -1]
__lowercase : Optional[int] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__lowercase : Any = False
__lowercase : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__a , torch_dtype=torch.floataa )
__lowercase : Optional[int] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__lowercase : Optional[int] = self.get_inputs()
pipe(**__a , callback=__a , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__a , torch_dtype=torch.floataa )
__lowercase : List[str] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowercase : Any = self.get_inputs()
__lowercase : int = pipe(**__a )
__lowercase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : List[str] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__lowercase : Tuple = inputs["""image"""].resize((504, 504) )
__lowercase : Optional[Any] = """timbrooks/instruct-pix2pix"""
__lowercase : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__lowercase : int = pipe(**__a )
__lowercase : Tuple = output.images[0]
__lowercase : List[str] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__lowercase : Optional[int] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 149
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int ) -> int:
'''simple docstring'''
A__ : List[str] =data
def __iter__( self : List[Any] ) -> int:
'''simple docstring'''
for element in self.data:
yield element
def __lowerCamelCase ( __snake_case : Union[str, Any]=True ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =Accelerator(even_batches=__snake_case )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def __lowerCamelCase ( __snake_case : Accelerator, __snake_case : int, __snake_case : int, __snake_case : bool = False ) -> str:
"""simple docstring"""
if iterable:
A__ : Optional[Any] =DummyIterableDataset(torch.as_tensor(range(__snake_case ) ) )
else:
A__ : List[str] =TensorDataset(torch.as_tensor(range(__snake_case ) ) )
A__ : Union[str, Any] =DataLoader(__snake_case, batch_size=__snake_case )
A__ : Any =accelerator.prepare(__snake_case )
return dl
def __lowerCamelCase ( __snake_case : Accelerator, __snake_case : int, __snake_case : int, __snake_case : List[int], __snake_case : List[int], ) -> Any:
"""simple docstring"""
A__ : str =create_dataloader(accelerator=__snake_case, dataset_size=__snake_case, batch_size=__snake_case )
A__ : Optional[Any] =[len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Any =create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__snake_case, dataset_size=3, batch_size=1, process_0_expected_batch_sizes=[1, 1], process_1_expected_batch_sizes=[1, 1], )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__snake_case, dataset_size=7, batch_size=2, process_0_expected_batch_sizes=[2, 2], process_1_expected_batch_sizes=[2, 2], )
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : List[Any] =create_accelerator(even_batches=__snake_case )
verify_dataloader_batch_sizes(
__snake_case, dataset_size=3, batch_size=1, process_0_expected_batch_sizes=[1, 1], process_1_expected_batch_sizes=[1], )
verify_dataloader_batch_sizes(
__snake_case, dataset_size=7, batch_size=2, process_0_expected_batch_sizes=[2, 2], process_1_expected_batch_sizes=[2, 1], )
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Dict =create_accelerator(even_batches=__snake_case )
A__ : List[Any] =torch.nn.Linear(1, 1 )
A__ : Optional[int] =accelerator.prepare(__snake_case )
A__ : Dict =create_dataloader(__snake_case, dataset_size=3, batch_size=1 )
A__ : List[str] =[]
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__snake_case ):
A__ : Optional[Any] =ddp_model(batch[0].float() )
A__ : Optional[int] =output.sum()
loss.backward()
batch_idxs.append(__snake_case )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def __lowerCamelCase ( __snake_case : str ) -> Any:
"""simple docstring"""
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category, __snake_case )
assert "only supported for multi-GPU" in str(w[-1].message )
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Any =True
A__ : int =False
A__ : List[Any] =create_accelerator(even_batches=__snake_case )
A__ : int =torch.nn.Linear(1, 1 )
A__ : Optional[Any] =accelerator.prepare(__snake_case )
A__ : str =create_dataloader(__snake_case, dataset_size=3, batch_size=1 )
A__ : Optional[int] =create_dataloader(__snake_case, dataset_size=3, batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model], even_batches=__snake_case ):
A__ : str =train_dl.batch_sampler.even_batches
A__ : str =valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any =True
A__ : Optional[int] =False
A__ : List[Any] =create_accelerator(even_batches=__snake_case )
A__ : int =torch.nn.Linear(1, 1 )
A__ : Union[str, Any] =accelerator.prepare(__snake_case )
create_dataloader(__snake_case, dataset_size=3, batch_size=1, iterable=__snake_case )
A__ : Optional[Any] =create_dataloader(__snake_case, dataset_size=3, batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model], even_batches=__snake_case ):
A__ : Tuple =batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =create_accelerator()
A__ : List[Any] =torch.nn.Linear(1, 1 )
A__ : Union[str, Any] =accelerator.prepare(__snake_case )
create_dataloader(__snake_case, dataset_size=3, batch_size=1, iterable=__snake_case )
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([ddp_model], even_batches=__snake_case ):
pass
assert issubclass(w[-1].category, __snake_case )
assert "only supported for map-style datasets" in str(w[-1].message )
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ : List[Any] =create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
A__ : Dict =accelerator.state.distributed_type
A__ : Union[str, Any] =DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__snake_case )
A__ : Any =original_state
if __name__ == "__main__":
main()
| 687
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109659552692574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 10
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
# Models and tokenizer
A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
A__ : Union[str, Any] =config.to_dict()
A__ : Any =config.to_diff_dict()
A__ : Optional[Any] =config.to_json_string()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
A__ : int =self.model_fpaa.get_memory_footprint()
A__ : Optional[Any] =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Tuple =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
A__ : Tuple =True
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
A__ : Dict =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =self.model_fpaa.to(torch.floataa )
A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : int =self.model_fpaa.float()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple ="""t5-small"""
A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="""Translate in German: Hello, my dog is cute"""
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] =None
# test with `t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : List[str] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ )
A__ : Dict =modules
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Dict =model.generate(**lowerCAmelCase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
A__ : Any ="""bigscience/bloom-560m"""
A__ : List[Any] ="""t5-small"""
# Different types of model
A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] ="""facebook/opt-350m"""
super().setUp()
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : int =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : Dict =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
A__ : int =LoRALayer(module.q_proj , rank=16 )
A__ : Any =LoRALayer(module.k_proj , rank=16 )
A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : Any =model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191854854152187
| 687
| 1
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case ( unittest.TestCase ):
@require_torch
def lowercase_ ( self : Union[str, Any])-> Dict:
'''simple docstring'''
__lowerCAmelCase: Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused")
__lowerCAmelCase: List[Any] = load_dataset("ashraq/esc50")
__lowerCAmelCase: str = dataset["train"]["audio"][-1]["array"]
__lowerCAmelCase: List[str] = audio_classifier(UpperCamelCase__ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"])
self.assertEqual(
nested_simplify(UpperCamelCase__) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF")
def lowercase_ ( self : Dict)-> Any:
'''simple docstring'''
pass
@slow
@require_torch
def lowercase_ ( self : Union[str, Any])-> int:
'''simple docstring'''
__lowerCAmelCase: Dict = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
__lowerCAmelCase: Dict = load_dataset("ashraq/esc50")
__lowerCAmelCase: Dict = dataset["train"]["audio"][-1]["array"]
__lowerCAmelCase: Any = audio_classifier(UpperCamelCase__ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"])
self.assertEqual(
nested_simplify(UpperCamelCase__) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
__lowerCAmelCase: Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"])
self.assertEqual(
nested_simplify(UpperCamelCase__) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
__lowerCAmelCase: Dict = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5)
self.assertEqual(
nested_simplify(UpperCamelCase__) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF")
def lowercase_ ( self : List[str])-> List[Any]:
'''simple docstring'''
pass
| 346
|
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A = True
except ImportError:
__A = False
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class snake_case ( __snake_case ):
@staticmethod
def lowercase_ ( UpperCamelCase__ : ArgumentParser)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = parser.add_parser("add-new-model")
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode.")
add_new_model_parser.add_argument("--testing_file" , type=UpperCamelCase__ , help="Configuration file on which to run.")
add_new_model_parser.add_argument(
"--path" , type=UpperCamelCase__ , help="Path to cookiecutter. Should only be used for testing purposes.")
add_new_model_parser.set_defaults(func=UpperCamelCase__)
def __init__( self : Dict , UpperCamelCase__ : bool , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , *UpperCamelCase__ : Tuple)-> str:
'''simple docstring'''
__lowerCAmelCase: Dict = testing
__lowerCAmelCase: Any = testing_file
__lowerCAmelCase: str = path
def lowercase_ ( self : int)-> Optional[int]:
'''simple docstring'''
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead.")
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n")
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__lowerCAmelCase: Union[str, Any] = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:2_2]]
if len(UpperCamelCase__) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory.")
__lowerCAmelCase: Any = (
Path(UpperCamelCase__).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
)
__lowerCAmelCase: Any = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase__))
else:
with open(self._testing_file , "r") as configuration_file:
__lowerCAmelCase: Any = json.load(UpperCamelCase__)
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path) , no_input=UpperCamelCase__ , extra_context=UpperCamelCase__ , )
__lowerCAmelCase: Dict = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:2_2]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r") as configuration_file:
__lowerCAmelCase: Optional[int] = json.load(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = configuration["lowercase_modelname"]
__lowerCAmelCase: List[str] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f"{directory}/configuration.json")
__lowerCAmelCase: Optional[Any] = "PyTorch" in generate_tensorflow_pytorch_and_flax
__lowerCAmelCase: str = "TensorFlow" in generate_tensorflow_pytorch_and_flax
__lowerCAmelCase: str = "Flax" in generate_tensorflow_pytorch_and_flax
__lowerCAmelCase: Any = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__)
os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=UpperCamelCase__)
# Tests require submodules as they have parent imports
with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , "w"):
pass
shutil.move(
f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , )
shutil.move(
f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(UpperCamelCase__ : int):
with open(UpperCamelCase__ , "r") as f:
__lowerCAmelCase: int = f.readlines()
with open(UpperCamelCase__ , "w") as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase__)
if output_pytorch:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py")
shutil.move(
f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_{lowercase_model_name}.py")
os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py")
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py")
shutil.move(
f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py")
os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py")
if output_flax:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py")
shutil.move(
f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py")
os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py")
shutil.move(
f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : List[str]):
# Create temp file
__lowerCAmelCase , __lowerCAmelCase: List[str] = mkstemp()
__lowerCAmelCase: Dict = False
with fdopen(UpperCamelCase__ , "w") as new_file:
with open(UpperCamelCase__) as old_file:
for line in old_file:
new_file.write(UpperCamelCase__)
if line_to_copy_below in line:
__lowerCAmelCase: str = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase__)
if not line_found:
raise ValueError(f"Line {line_to_copy_below} was not found in file.")
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase__ , UpperCamelCase__)
# Remove original file
remove(UpperCamelCase__)
# Move new file
move(UpperCamelCase__ , UpperCamelCase__)
def skip_units(UpperCamelCase__ : Union[str, Any]):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase__ : List[str]):
with open(UpperCamelCase__) as datafile:
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: str = False
__lowerCAmelCase: Optional[int] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__lowerCAmelCase: List[Any] = line.split("\"")[1]
__lowerCAmelCase: Dict = skip_units(UpperCamelCase__)
elif "# Below: " in line and "##" not in line:
__lowerCAmelCase: List[Any] = line.split("\"")[1]
__lowerCAmelCase: Any = skip_units(UpperCamelCase__)
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: List[str] = []
elif "# Replace with" in line and "##" not in line:
__lowerCAmelCase: List[str] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase__)
remove(UpperCamelCase__)
replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py")
os.rmdir(UpperCamelCase__)
| 346
| 1
|
import numpy as np
def A_( A ):
return 1 / (1 + np.exp(-vector ))
def A_( A ):
return vector * sigmoid(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : List[Any] = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 486
| 0
|
from random import randint, random
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = False , lowercase_ = 5 , ) -> list:
_snake_case : Any = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : str = 0
_snake_case : Optional[int] = max(lowercase_ , 0 )
while i < number_of_cells:
_snake_case : Dict = (
randint(0 , lowercase_ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def A_ ( lowercase_ , lowercase_ ) -> int:
_snake_case : Any = 0
_snake_case : int = highway_now[car_index + 1 :]
for cell in range(len(lowercase_ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase_ , -1 )
def A_ ( lowercase_ , lowercase_ , lowercase_ ) -> list:
_snake_case : Dict = len(lowercase_ )
# Beforce calculations, the highway is empty
_snake_case : Optional[int] = [-1] * number_of_cells
for car_index in range(lowercase_ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : Tuple = min(highway_now[car_index] + 1 , lowercase_ )
# Number of empty cell before the next car
_snake_case : int = get_distance(lowercase_ , lowercase_ ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowercase_ )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : Optional[int] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> list:
_snake_case : int = len(highway[0] )
for i in range(lowercase_ ):
_snake_case : int = update(highway[i] , lowercase_ , lowercase_ )
_snake_case : Any = [-1] * number_of_cells
for car_index in range(lowercase_ ):
_snake_case : List[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : List[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : List[Any] = speed
highway.append(lowercase_ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A (__UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = """unispeech"""
def __init__( self , lowercase_=32 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.02 , lowercase_=1E-5 , lowercase_="group" , lowercase_="gelu" , lowercase_=(512, 512, 512, 512, 512, 512, 512) , lowercase_=(5, 2, 2, 2, 2, 2, 2) , lowercase_=(10, 3, 3, 3, 3, 2, 2) , lowercase_=False , lowercase_=128 , lowercase_=16 , lowercase_=False , lowercase_=True , lowercase_=0.05 , lowercase_=10 , lowercase_=2 , lowercase_=0.0 , lowercase_=10 , lowercase_=0 , lowercase_=320 , lowercase_=2 , lowercase_=0.1 , lowercase_=100 , lowercase_=256 , lowercase_=256 , lowercase_=0.1 , lowercase_="mean" , lowercase_=False , lowercase_=False , lowercase_=256 , lowercase_=80 , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=0.5 , **lowercase_ , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
_snake_case : Dict = hidden_size
_snake_case : List[Any] = feat_extract_norm
_snake_case : Any = feat_extract_activation
_snake_case : str = list(lowercase_ )
_snake_case : Any = list(lowercase_ )
_snake_case : Dict = list(lowercase_ )
_snake_case : str = conv_bias
_snake_case : Optional[int] = num_conv_pos_embeddings
_snake_case : List[str] = num_conv_pos_embedding_groups
_snake_case : int = len(self.conv_dim )
_snake_case : str = num_hidden_layers
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_act
_snake_case : int = num_attention_heads
_snake_case : List[str] = hidden_dropout
_snake_case : Tuple = attention_dropout
_snake_case : List[str] = activation_dropout
_snake_case : Dict = feat_proj_dropout
_snake_case : Any = final_dropout
_snake_case : List[Any] = layerdrop
_snake_case : Optional[int] = layer_norm_eps
_snake_case : Any = initializer_range
_snake_case : Tuple = num_ctc_classes
_snake_case : Dict = vocab_size
_snake_case : List[str] = do_stable_layer_norm
_snake_case : List[str] = use_weighted_layer_sum
_snake_case : Optional[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case : Optional[Any] = apply_spec_augment
_snake_case : Union[str, Any] = mask_time_prob
_snake_case : Union[str, Any] = mask_time_length
_snake_case : str = mask_time_min_masks
_snake_case : Dict = mask_feature_prob
_snake_case : List[str] = mask_feature_length
_snake_case : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_snake_case : List[Any] = num_codevectors_per_group
_snake_case : Any = num_codevector_groups
_snake_case : Dict = contrastive_logits_temperature
_snake_case : str = feat_quantizer_dropout
_snake_case : Optional[int] = num_negatives
_snake_case : Optional[int] = codevector_dim
_snake_case : List[Any] = proj_codevector_dim
_snake_case : List[Any] = diversity_loss_weight
# ctc loss
_snake_case : Any = ctc_loss_reduction
_snake_case : str = ctc_zero_infinity
# pretraining loss
_snake_case : int = replace_prob
@property
def __a ( self ) -> int:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 326
| 1
|
def __UpperCamelCase ( _A : str ) ->list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(SCREAMING_SNAKE_CASE_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 702
|
from collections import deque
from math import floor
from random import random
from time import time
class _SCREAMING_SNAKE_CASE :
def __init__( self )-> List[str]:
lowerCamelCase_ ={}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[Any]:
if self.graph.get(_SCREAMING_SNAKE_CASE ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCamelCase_ =[[w, v]]
if not self.graph.get(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =[]
def _snake_case ( self )-> str:
return list(self.graph )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict:
if self.graph.get(_SCREAMING_SNAKE_CASE ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> Optional[Any]:
if s == d:
return []
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_SCREAMING_SNAKE_CASE )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]:
if c == -1:
lowerCamelCase_ =floor(random() * 1_0000 ) + 10
for i in range(_SCREAMING_SNAKE_CASE ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase_ =floor(random() * c ) + 1
if n != i:
self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Any:
lowerCamelCase_ =deque()
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
d.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
while d:
lowerCamelCase_ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[Any]:
lowerCamelCase_ =0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]:
return len(self.graph[u] )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Union[str, Any]:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =[]
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return sorted_nodes
def _snake_case ( self )-> str:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return list(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return False
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> List[str]:
lowerCamelCase_ =time()
self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]:
lowerCamelCase_ =time()
self.bfs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
class _SCREAMING_SNAKE_CASE :
def __init__( self )-> Optional[Any]:
lowerCamelCase_ ={}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[str]:
# check if the u exists
if self.graph.get(_SCREAMING_SNAKE_CASE ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCamelCase_ =[[w, v]]
# add the other way
if self.graph.get(_SCREAMING_SNAKE_CASE ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCamelCase_ =[[w, u]]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple:
if self.graph.get(_SCREAMING_SNAKE_CASE ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_SCREAMING_SNAKE_CASE )
# the other way round
if self.graph.get(_SCREAMING_SNAKE_CASE ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> int:
if s == d:
return []
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_SCREAMING_SNAKE_CASE )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]:
if c == -1:
lowerCamelCase_ =floor(random() * 1_0000 ) + 10
for i in range(_SCREAMING_SNAKE_CASE ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase_ =floor(random() * c ) + 1
if n != i:
self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]:
lowerCamelCase_ =deque()
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
d.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
while d:
lowerCamelCase_ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
return len(self.graph[u] )
def _snake_case ( self )-> Any:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return list(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Any:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return False
def _snake_case ( self )-> Optional[Any]:
return list(self.graph )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> str:
lowerCamelCase_ =time()
self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Dict:
lowerCamelCase_ =time()
self.bfs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
| 75
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : int=7, UpperCamelCase__ : Optional[Any]=3, UpperCamelCase__ : Optional[int]=18, UpperCamelCase__ : List[str]=30, UpperCamelCase__ : List[str]=4_00, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : int=None, UpperCamelCase__ : List[Any]=True, ) -> Optional[int]:
_A = size if size is not None else {'height': 18, 'width': 18}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = apply_ocr
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowercase_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> int:
_A = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : str ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__, 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'size' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'apply_ocr' ) )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'height': 18, 'width': 18} )
_A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self : Any ) -> Tuple:
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
self.assertIsInstance(encoding.words, UpperCamelCase__ )
self.assertIsInstance(encoding.boxes, UpperCamelCase__ )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# with apply_OCR = True
_A = LayoutLMvaImageProcessor()
from datasets import load_dataset
_A = load_dataset('hf-internal-testing/fixtures_docvqa', split='test' )
_A = Image.open(ds[0]['file'] ).convert('RGB' )
_A = image_processing(UpperCamelCase__, return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ), len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_A = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, UpperCamelCase__ )
self.assertListEqual(encoding.boxes, UpperCamelCase__ )
# with apply_OCR = False
_A = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ )
_A = image_processing(UpperCamelCase__, return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
| 107
|
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class a_ ( lowerCamelCase ):
lowercase = """detr"""
lowercase = ["""past_key_values"""]
lowercase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = backbone_config.get("""model_type""" )
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(_SCREAMING_SNAKE_CASE )
# set timm attributes to None
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None, None, None
UpperCamelCase = use_timm_backbone
UpperCamelCase = backbone_config
UpperCamelCase = num_channels
UpperCamelCase = num_queries
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = init_xavier_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = encoder_layers
UpperCamelCase = auxiliary_loss
UpperCamelCase = position_embedding_type
UpperCamelCase = backbone
UpperCamelCase = use_pretrained_backbone
UpperCamelCase = dilation
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = mask_loss_coefficient
UpperCamelCase = dice_loss_coefficient
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self.d_model
@classmethod
def A__ ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return cls(backbone_config=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Dict[str, any]:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
class a_ ( lowerCamelCase ):
lowercase = version.parse("""1.11""" )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def A__ ( self ) -> float:
"""simple docstring"""
return 1e-5
@property
def A__ ( self ) -> int:
"""simple docstring"""
return 12
| 301
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=None , ):
__SCREAMING_SNAKE_CASE : Optional[int] = parent
__SCREAMING_SNAKE_CASE : int = batch_size
__SCREAMING_SNAKE_CASE : str = image_size
__SCREAMING_SNAKE_CASE : Dict = patch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
__SCREAMING_SNAKE_CASE : Optional[int] = is_training
__SCREAMING_SNAKE_CASE : Dict = use_labels
__SCREAMING_SNAKE_CASE : Tuple = hidden_size
__SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE : List[str] = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE : List[str] = num_patches + 1
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels
def a_ ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a_ ( self , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = ViTMSNModel(config=a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : str = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE : Optional[int] = ViTMSNForImageClassification(a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = model(a__ , labels=a__ )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = ViTMSNForImageClassification(a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Optional[int] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
snake_case__ : Optional[int] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Optional[int] = False
snake_case__ : str = False
snake_case__ : Tuple = False
snake_case__ : Optional[int] = False
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = ViTMSNModelTester(self )
__SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def a_ ( self ):
pass
def a_ ( self ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Tuple = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def a_ ( self ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = model_class(a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a_ ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : List[str] = ViTMSNModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a_ ( self ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def a_ ( self ):
torch.manual_seed(2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(a__ )
__SCREAMING_SNAKE_CASE : Any = self.default_image_processor
__SCREAMING_SNAKE_CASE : Dict = prepare_img()
__SCREAMING_SNAKE_CASE : Any = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(**a__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
| 564
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''spiece.model'''}
lowercase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowercase = '''▁'''
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : str = VOCAB_FILES_NAMES
snake_case__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , a__ , a__="</s>" , a__="<unk>" , a__="<pad>" , a__=100 , a__=None , a__ = None , a__=True , **a__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = [f'<extra_id_{i}>' for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
__SCREAMING_SNAKE_CASE : int = legacy
__SCREAMING_SNAKE_CASE : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
__SCREAMING_SNAKE_CASE : Dict = vocab_file
__SCREAMING_SNAKE_CASE : Union[str, Any] = extra_ids
__SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def a_ ( a__ , a__ , a__ ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__SCREAMING_SNAKE_CASE : Optional[int] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , )
return max_model_length
@property
def a_ ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a_ ( self ):
return list(
set(filter(lambda a__ : bool(re.search(R"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self ):
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def a_ ( self , a__ ):
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self , a__ , a__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self , a__ , a__ = None ):
__SCREAMING_SNAKE_CASE : List[str] = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
__SCREAMING_SNAKE_CASE : Any = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self ):
__SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : List[str] = None
return state
def __setstate__( self , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__SCREAMING_SNAKE_CASE : List[Any] = {}
__SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self , a__ , **a__ ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__SCREAMING_SNAKE_CASE : str = SPIECE_UNDERLINE + text.replace(a__ , " " )
return super().tokenize(a__ , **a__ )
def a_ ( self , a__ , **a__ ):
if not self.legacy:
__SCREAMING_SNAKE_CASE : Union[str, Any] = text.startswith(a__ )
if is_first:
__SCREAMING_SNAKE_CASE : str = text[1:]
__SCREAMING_SNAKE_CASE : List[str] = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a_ ( self , a__ ):
if token.startswith("<extra_id_" ):
__SCREAMING_SNAKE_CASE : Any = re.match(R"<extra_id_(\d+)>" , a__ )
__SCREAMING_SNAKE_CASE : str = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def a_ ( self , a__ ):
if index < self.sp_model.get_piece_size():
__SCREAMING_SNAKE_CASE : Any = self.sp_model.IdToPiece(a__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = ""
__SCREAMING_SNAKE_CASE : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : Any = []
else:
current_sub_tokens.append(a__ )
__SCREAMING_SNAKE_CASE : Dict = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
__SCREAMING_SNAKE_CASE : Dict = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 564
| 1
|
"""simple docstring"""
import numpy as np
def __lowerCAmelCase ( __UpperCamelCase : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = 'open-llama'
def __init__( self : str ,_UpperCAmelCase : int=100000 ,_UpperCAmelCase : List[str]=4096 ,_UpperCAmelCase : Dict=11008 ,_UpperCAmelCase : int=32 ,_UpperCAmelCase : Union[str, Any]=32 ,_UpperCAmelCase : List[str]="silu" ,_UpperCAmelCase : List[Any]=2048 ,_UpperCAmelCase : Any=0.02 ,_UpperCAmelCase : int=1E-6 ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Dict=0 ,_UpperCAmelCase : Optional[Any]=1 ,_UpperCAmelCase : Dict=2 ,_UpperCAmelCase : Tuple=False ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : List[str]=True ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Union[str, Any]=None ,**_UpperCAmelCase : Optional[int] ,):
_a : str = vocab_size
_a : str = max_position_embeddings
_a : List[str] = hidden_size
_a : Any = intermediate_size
_a : Union[str, Any] = num_hidden_layers
_a : Tuple = num_attention_heads
_a : int = hidden_act
_a : str = initializer_range
_a : Any = rms_norm_eps
_a : Dict = use_cache
_a : Optional[int] = kwargs.pop(
'use_memorry_efficient_attention' ,_UpperCAmelCase )
_a : int = hidden_dropout_prob
_a : int = attention_dropout_prob
_a : Union[str, Any] = use_stable_embedding
_a : str = shared_input_output_embedding
_a : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,tie_word_embeddings=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Optional[Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"""got {self.rope_scaling}""" )
_a : Optional[Any] = self.rope_scaling.get('type' ,_UpperCAmelCase )
_a : Optional[Any] = self.rope_scaling.get('factor' ,_UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 358
| 0
|
import colorsys
from PIL import Image # type: ignore
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = x
__lowerCAmelCase = y
for step in range(UpperCAmelCase__ ): # noqa: B007
__lowerCAmelCase = a * a - b * b + x
__lowerCAmelCase = 2 * a * b + y
__lowerCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) )
def __lowercase ( UpperCAmelCase__ = 800 , UpperCAmelCase__ = 600 , UpperCAmelCase__ = -0.6 , UpperCAmelCase__ = 0 , UpperCAmelCase__ = 3.2 , UpperCAmelCase__ = 50 , UpperCAmelCase__ = True , ):
"""simple docstring"""
__lowerCAmelCase = Image.new('RGB' , (image_width, image_height) )
__lowerCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase__ ):
for image_y in range(UpperCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
__lowerCAmelCase = figure_width / image_width * image_height
__lowerCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
__lowerCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
__lowerCAmelCase = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__lowerCAmelCase = get_color_coded_rgb(UpperCAmelCase__ )
else:
__lowerCAmelCase = get_black_and_white_rgb(UpperCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 719
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
lowerCamelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
lowerCamelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
lowerCamelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 102
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _snake_case ( A__ ):
_lowercase : Optional[Any] = '''decision_transformer'''
_lowercase : str = ['''past_key_values''']
_lowercase : Union[str, Any] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=17 , a=4 , a=128 , a=4096 , a=True , a=1 , a=1024 , a=3 , a=1 , a=None , a="relu" , a=0.1 , a=0.1 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=True , a=5_0256 , a=5_0256 , a=False , a=False , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = state_dim
SCREAMING_SNAKE_CASE = act_dim
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = max_ep_len
SCREAMING_SNAKE_CASE = action_tanh
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=a , eos_token_id=a , **a)
| 73
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ : Dict = logging.get_logger(__name__)
class _snake_case ( A__ ):
def __init__( self , *a , **a) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 73
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=2 , __a=32 , __a=16 , __a=3 , __a=True , __a=True , __a=32 , __a=4 , __a=[0, 1, 2, 3] , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.0_2 , __a=3 , __a=[1, 3_84, 24, 24] , __a=True , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = backbone_out_indices
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = backbone_featmap_shape
__lowerCAmelCase = scope
__lowerCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 1
def snake_case ( self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
__lowerCAmelCase = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 1_92, 3_84, 7_68],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__a , backbone_featmap_shape=self.backbone_featmap_shape , )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = DPTModel(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = DPTForDepthEstimation(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = DPTForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[str] =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__UpperCAmelCase : Optional[Any] =(
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Any =False
__UpperCAmelCase : Optional[int] =False
def snake_case ( self ):
__lowerCAmelCase = DPTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
def snake_case ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
if model_class in get_values(__a ):
continue
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.train()
__lowerCAmelCase = self._prepare_for_class(__a , __a , return_labels=__a )
__lowerCAmelCase = model(**__a ).loss
loss.backward()
def snake_case ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = False
__lowerCAmelCase = True
if model_class in get_values(__a ) or not model_class.supports_gradient_checkpointing:
continue
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.gradient_checkpointing_enable()
model.train()
__lowerCAmelCase = self._prepare_for_class(__a , __a , return_labels=__a )
__lowerCAmelCase = model(**__a ).loss
loss.backward()
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(__a )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=__a )
# Skip the check for the backbone
__lowerCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__lowerCAmelCase = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self ):
pass
@slow
def snake_case ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__lowerCAmelCase = DPTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = "add"
with self.assertRaises(__a ):
__lowerCAmelCase = DPTForDepthEstimation(__a )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__lowerCAmelCase = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(__a )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__a )
__lowerCAmelCase = outputs.predicted_depth
# verify the predicted depth
__lowerCAmelCase = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , __a )
__lowerCAmelCase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__a )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __a , atol=1e-4 ) )
| 282
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = psutil.Process()
__lowerCAmelCase = False
def snake_case ( self ):
__lowerCAmelCase = -1
while True:
__lowerCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def snake_case ( self ):
__lowerCAmelCase = True
__lowerCAmelCase = threading.Thread(target=self.peak_monitor )
__lowerCAmelCase = True
self.thread.start()
def snake_case ( self ):
__lowerCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
A : Any = PeakCPUMemory()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase = torch.cuda.memory_allocated(_UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase = (torch.cuda.memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
__lowerCAmelCase = (torch.cuda.max_memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
return measures
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(_UpperCamelCase )]:.2f}MiB" )
__lowerCAmelCase = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 282
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :torch.FloatTensor
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , _UpperCAmelCase = 65536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
super().__init__()
lowercase__: List[Any] = sample_size
# time
if time_embedding_type == "fourier":
lowercase__: List[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase )
lowercase__: List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowercase__: Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase )
lowercase__: List[str] = block_out_channels[0]
if use_timestep_embedding:
lowercase__: Dict = block_out_channels[0] * 4
lowercase__: Union[str, Any] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
lowercase__: List[Any] = nn.ModuleList([] )
lowercase__: List[str] = None
lowercase__: int = nn.ModuleList([] )
lowercase__: str = None
# down
lowercase__: Union[str, Any] = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase ):
lowercase__: List[Any] = output_channel
lowercase__: Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowercase__: str = i == len(_UpperCAmelCase ) - 1
lowercase__: List[Any] = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase )
# mid
lowercase__: Dict = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
lowercase__: Any = list(reversed(_UpperCAmelCase ) )
lowercase__: List[str] = reversed_block_out_channels[0]
if out_block_type is None:
lowercase__: List[str] = out_channels
else:
lowercase__: Optional[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase ):
lowercase__: List[str] = output_channel
lowercase__: List[str] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase ) - 1 else final_upsample_channels
)
lowercase__: List[str] = i == len(_UpperCAmelCase ) - 1
lowercase__: Union[str, Any] = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase )
lowercase__: Optional[Any] = output_channel
# out
lowercase__: Optional[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowercase__: str = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
lowercase__: Any = timestep
if not torch.is_tensor(_UpperCAmelCase ):
lowercase__: Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowercase__: Any = timesteps[None].to(sample.device )
lowercase__: Dict = self.time_proj(_UpperCAmelCase )
if self.config.use_timestep_embedding:
lowercase__: Optional[int] = self.time_mlp(_UpperCAmelCase )
else:
lowercase__: Tuple = timestep_embed[..., None]
lowercase__: Optional[int] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowercase__: Union[str, Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowercase__: Optional[Any] = ()
for downsample_block in self.down_blocks:
lowercase__, lowercase__: Any = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowercase__: Union[str, Any] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowercase__: str = down_block_res_samples[-1:]
lowercase__: int = down_block_res_samples[:-1]
lowercase__: List[Any] = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase )
# 5. post-process
if self.out_block:
lowercase__: Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase )
| 586
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
lowercase__: Any = {}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 ):
if self.graph.get(_UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase__: List[Any] = [[w, v]]
if not self.graph.get(_UpperCAmelCase ):
lowercase__: Any = []
def _snake_case ( self ):
return list(self.graph )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ):
if s == d:
return []
lowercase__: str = []
lowercase__: Optional[Any] = []
if s == -2:
lowercase__: List[str] = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase__: Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_UpperCAmelCase ) != 0:
lowercase__: List[Any] = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase__: str = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return visited
def _snake_case ( self , _UpperCAmelCase=-1 ):
if c == -1:
lowercase__: Union[str, Any] = floor(random() * 10000 ) + 10
for i in range(_UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(_UpperCAmelCase , _UpperCAmelCase , 1 )
def _snake_case ( self , _UpperCAmelCase=-2 ):
lowercase__: List[str] = deque()
lowercase__: Any = []
if s == -2:
lowercase__: List[str] = list(self.graph )[0]
d.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
while d:
lowercase__: List[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: int = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _snake_case ( self , _UpperCAmelCase ):
return len(self.graph[u] )
def _snake_case ( self , _UpperCAmelCase=-2 ):
lowercase__: Dict = []
lowercase__: int = []
if s == -2:
lowercase__: int = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase__: Optional[int] = s
lowercase__: Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_UpperCAmelCase ) != 0:
lowercase__: str = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase__: Optional[Any] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return sorted_nodes
def _snake_case ( self ):
lowercase__: Optional[int] = []
lowercase__: str = []
lowercase__: Union[str, Any] = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase__: Optional[int] = -2
lowercase__: Optional[int] = []
lowercase__: Optional[Any] = s
lowercase__: List[str] = False
lowercase__: Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(_UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: List[str] = True
if len(_UpperCAmelCase ) != 0:
lowercase__: str = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase__: Optional[Any] = False
indirect_parents.append(_UpperCAmelCase )
lowercase__: Union[str, Any] = s
lowercase__: List[Any] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return list(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Union[str, Any] = []
lowercase__: List[str] = []
lowercase__: str = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase__: Optional[int] = -2
lowercase__: Tuple = []
lowercase__: Optional[Any] = s
lowercase__: Dict = False
lowercase__: int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Optional[int] = len(_UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: List[str] = True
if len(_UpperCAmelCase ) != 0:
lowercase__: Union[str, Any] = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase__: Dict = False
indirect_parents.append(_UpperCAmelCase )
lowercase__: int = s
lowercase__: List[str] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return False
def _snake_case ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ):
lowercase__: str = time()
self.dfs(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = time()
return end - begin
def _snake_case ( self , _UpperCAmelCase=-2 ):
lowercase__: List[Any] = time()
self.bfs(_UpperCAmelCase )
lowercase__: Any = time()
return end - begin
class UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
lowercase__: Union[str, Any] = {}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 ):
# check if the u exists
if self.graph.get(_UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase__: Dict = [[w, v]]
# add the other way
if self.graph.get(_UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase__: List[Any] = [[w, u]]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_UpperCAmelCase )
# the other way round
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ):
if s == d:
return []
lowercase__: List[str] = []
lowercase__: List[Any] = []
if s == -2:
lowercase__: Optional[Any] = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase__: Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_UpperCAmelCase ) != 0:
lowercase__: Dict = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase__: str = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return visited
def _snake_case ( self , _UpperCAmelCase=-1 ):
if c == -1:
lowercase__: List[str] = floor(random() * 10000 ) + 10
for i in range(_UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: List[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_UpperCAmelCase , _UpperCAmelCase , 1 )
def _snake_case ( self , _UpperCAmelCase=-2 ):
lowercase__: Optional[int] = deque()
lowercase__: Optional[int] = []
if s == -2:
lowercase__: Optional[int] = list(self.graph )[0]
d.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
while d:
lowercase__: Tuple = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self , _UpperCAmelCase ):
return len(self.graph[u] )
def _snake_case ( self ):
lowercase__: Dict = []
lowercase__: Optional[Any] = []
lowercase__: Union[str, Any] = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase__: Union[str, Any] = -2
lowercase__: Dict = []
lowercase__: str = s
lowercase__: Tuple = False
lowercase__: Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(_UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Optional[int] = True
if len(_UpperCAmelCase ) != 0:
lowercase__: Optional[Any] = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase__: List[Any] = False
indirect_parents.append(_UpperCAmelCase )
lowercase__: List[Any] = s
lowercase__: Union[str, Any] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return list(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: str = []
lowercase__: List[str] = []
lowercase__: str = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase__: Optional[Any] = -2
lowercase__: List[str] = []
lowercase__: List[str] = s
lowercase__: str = False
lowercase__: List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Union[str, Any] = len(_UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: int = True
if len(_UpperCAmelCase ) != 0:
lowercase__: List[Any] = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase__: int = False
indirect_parents.append(_UpperCAmelCase )
lowercase__: Union[str, Any] = s
lowercase__: List[Any] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return False
def _snake_case ( self ):
return list(self.graph )
def _snake_case ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ):
lowercase__: Optional[Any] = time()
self.dfs(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = time()
return end - begin
def _snake_case ( self , _UpperCAmelCase=-2 ):
lowercase__: List[Any] = time()
self.bfs(_UpperCAmelCase )
lowercase__: int = time()
return end - begin
| 586
| 1
|
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
A__ : Optional[datasets.Features] = None
A__ : str = "utf-8"
A__ : Optional[str] = None
A__ : Optional[str] = None
A__ : bool = True # deprecated
A__ : Optional[int] = None # deprecated
A__ : int = 10 << 20 # 10MB
A__ : Optional[bool] = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
A__ : Tuple = JsonConfig
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
_snake_case = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_snake_case = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
_snake_case = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [files]
_snake_case = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_snake_case = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [files]
_snake_case = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def __UpperCAmelCase ( self : int , __lowerCamelCase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_snake_case = self.config.features.arrow_schema.field(__lowerCamelCase ).type
_snake_case = pa_table.append_column(__lowerCamelCase , pa.array([None] * len(__lowerCamelCase ) , type=__lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_snake_case = table_cast(__lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Tuple ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_snake_case = json.load(__lowerCamelCase )
# We keep only the field we are interested in
_snake_case = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowerCamelCase , (list, tuple) ):
_snake_case = set().union(*[row.keys() for row in dataset] )
_snake_case = {col: [row.get(__lowerCamelCase ) for row in dataset] for col in keys}
else:
_snake_case = dataset
_snake_case = pa.Table.from_pydict(__lowerCamelCase )
yield file_idx, self._cast_table(__lowerCamelCase )
# If the file has one json object per line
else:
with open(__lowerCamelCase , '''rb''' ) as f:
_snake_case = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_snake_case = max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
_snake_case = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
_snake_case = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_snake_case = batch.decode(self.config.encoding , errors=__lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
_snake_case = paj.read_json(
io.BytesIO(__lowerCamelCase ) , read_options=paj.ReadOptions(block_size=__lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(__lowerCamelCase )
or block_size > len(__lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(__lowerCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_snake_case = json.load(__lowerCamelCase )
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowerCamelCase , __lowerCamelCase ): # list is the only sequence type supported in JSON
try:
_snake_case = set().union(*[row.keys() for row in dataset] )
_snake_case = {col: [row.get(__lowerCamelCase ) for row in dataset] for col in keys}
_snake_case = pa.Table.from_pydict(__lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise ValueError(f"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__lowerCamelCase )
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCamelCase )
batch_idx += 1
| 404
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> str:
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case = ''''''
else:
_snake_case = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = in_proj_bias[: config.hidden_size]
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = in_proj_bias[-config.hidden_size :]
def snake_case ( lowerCAmelCase_ ) -> Any:
_snake_case = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = dct.pop(lowerCAmelCase_ )
_snake_case = val
def snake_case ( ) -> List[Any]:
_snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_snake_case = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> Any:
_snake_case = ViTConfig()
# patch_size
if model_name[-1] == "8":
_snake_case = 8
# set labels if required
if not base_model:
_snake_case = 1000
_snake_case = '''huggingface/label-files'''
_snake_case = '''imagenet-1k-id2label.json'''
_snake_case = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
_snake_case = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_snake_case = 384
_snake_case = 1536
_snake_case = 12
_snake_case = 6
# load original model from torch hub
_snake_case = torch.hub.load('''facebookresearch/dino:main''' , lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
_snake_case = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
if base_model:
_snake_case = ViTModel(lowerCAmelCase_ , add_pooling_layer=lowerCAmelCase_ ).eval()
else:
_snake_case = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
_snake_case = ViTImageProcessor()
_snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' )
_snake_case = encoding['''pixel_values''']
_snake_case = model(lowerCAmelCase_ )
if base_model:
_snake_case = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_snake_case = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
snake_case = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 404
| 1
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Optional[int] ,__lowercase : Any ,__lowercase : List[str] ,__lowercase : List[Any]=True ,__lowercase : Tuple="pt" ):
'''simple docstring'''
A_ : Dict = {'add_prefix_space': True} if isinstance(__lowercase ,__lowercase ) and not line.startswith(' ' ) else {}
A_ : int = padding_side
return tokenizer(
[line] ,max_length=__lowercase ,padding='max_length' if pad_to_max_length else None ,truncation=__lowercase ,return_tensors=__lowercase ,add_special_tokens=__lowercase ,**__lowercase ,)
def UpperCamelCase ( __lowercase : int ,__lowercase : int ,__lowercase : Any=None ,):
'''simple docstring'''
A_ : Optional[int] = input_ids.ne(__lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase="train" , lowercase=None , lowercase=None , lowercase=None , lowercase="" , ):
"""simple docstring"""
super().__init__()
A_ : List[Any] = Path(_UpperCAmelCase ).joinpath(type_path + '.source' )
A_ : Any = Path(_UpperCAmelCase ).joinpath(type_path + '.target' )
A_ : List[Any] = self.get_char_lens(self.src_file )
A_ : Tuple = max_source_length
A_ : List[Any] = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
A_ : Optional[Any] = tokenizer
A_ : List[str] = prefix
if n_obs is not None:
A_ : List[Any] = self.src_lens[:n_obs]
A_ : List[str] = src_lang
A_ : List[str] = tgt_lang
def __len__( self ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self , lowercase ):
"""simple docstring"""
A_ : int = index + 1 # linecache starts at 1
A_ : Tuple = self.prefix + linecache.getline(str(self.src_file ) , _UpperCAmelCase ).rstrip('\n' )
A_ : Tuple = linecache.getline(str(self.tgt_file ) , _UpperCAmelCase ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : Optional[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCAmelCase ) else self.tokenizer
)
A_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCAmelCase ) else self.tokenizer
A_ : Dict = encode_line(_UpperCAmelCase , _UpperCAmelCase , self.max_source_length , 'right' )
A_ : Tuple = encode_line(_UpperCAmelCase , _UpperCAmelCase , self.max_target_length , 'right' )
A_ : List[Any] = source_inputs['input_ids'].squeeze()
A_ : str = target_inputs['input_ids'].squeeze()
A_ : Optional[int] = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
return [len(_UpperCAmelCase ) for x in Path(_UpperCAmelCase ).open().readlines()]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = torch.stack([x['input_ids'] for x in batch] )
A_ : Optional[Any] = torch.stack([x['attention_mask'] for x in batch] )
A_ : str = torch.stack([x['decoder_input_ids'] for x in batch] )
A_ : Optional[int] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCAmelCase )
else self.tokenizer.pad_token_id
)
A_ : List[str] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCAmelCase )
else self.tokenizer.pad_token_id
)
A_ : Any = trim_batch(_UpperCAmelCase , _UpperCAmelCase )
A_ , A_ : Union[str, Any] = trim_batch(_UpperCAmelCase , _UpperCAmelCase , attention_mask=_UpperCAmelCase )
A_ : List[Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_UpperCAmelCase = getLogger(__name__)
def UpperCamelCase ( __lowercase : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(__lowercase ) )
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : Any = get_git_info()
save_json(__lowercase ,os.path.join(__lowercase ,'git_log.json' ) )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ,__lowercase : Dict=4 ,**__lowercase : List[str] ):
'''simple docstring'''
with open(__lowercase ,'w' ) as f:
json.dump(__lowercase ,__lowercase ,indent=__lowercase ,**__lowercase )
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
with open(__lowercase ) as f:
return json.load(__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : int = git.Repo(search_parent_directories=__lowercase )
A_ : Optional[Any] = {
'repo_id': str(__lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def UpperCamelCase ( __lowercase : Callable ,__lowercase : Iterable ):
'''simple docstring'''
return list(map(__lowercase ,__lowercase ) )
def UpperCamelCase ( __lowercase : int ,__lowercase : str ):
'''simple docstring'''
with open(__lowercase ,'wb' ) as f:
return pickle.dump(__lowercase ,__lowercase )
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
def remove_articles(__lowercase : Optional[int] ):
return re.sub(r'\b(a|an|the)\b' ,' ' ,__lowercase )
def white_space_fix(__lowercase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowercase : Tuple ):
A_ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowercase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowercase ) ) ) )
def UpperCamelCase ( __lowercase : int ,__lowercase : List[Any] ):
'''simple docstring'''
A_ : Tuple = normalize_answer(__lowercase ).split()
A_ : Optional[int] = normalize_answer(__lowercase ).split()
A_ : Optional[Any] = Counter(__lowercase ) & Counter(__lowercase )
A_ : Any = sum(common.values() )
if num_same == 0:
return 0
A_ : Dict = 1.0 * num_same / len(__lowercase )
A_ : Union[str, Any] = 1.0 * num_same / len(__lowercase )
A_ : Dict = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( __lowercase : int ,__lowercase : str ):
'''simple docstring'''
return normalize_answer(__lowercase ) == normalize_answer(__lowercase )
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : List[str] ):
'''simple docstring'''
assert len(__lowercase ) == len(__lowercase )
A_ : List[str] = 0
for hypo, pred in zip(__lowercase ,__lowercase ):
em += exact_match_score(__lowercase ,__lowercase )
if len(__lowercase ) > 0:
em /= len(__lowercase )
return {"em": em}
def UpperCamelCase ( __lowercase : List[str] ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : int ,__lowercase : int ):
'''simple docstring'''
A_ : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Optional[Any] = 'dropout_rate'
for p in extra_params:
if getattr(__lowercase ,__lowercase ,__lowercase ):
if not hasattr(__lowercase ,__lowercase ) and not hasattr(__lowercase ,equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__lowercase ) )
delattr(__lowercase ,__lowercase )
continue
A_ : List[Any] = p if hasattr(__lowercase ,__lowercase ) else equivalent_param[p]
setattr(__lowercase ,__lowercase ,getattr(__lowercase ,__lowercase ) )
delattr(__lowercase ,__lowercase )
return hparams, config
| 558
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Optional[Any] = 1_6
A : str = 3_2
def UpperCamelCase ( __magic_name__ : Accelerator , __magic_name__ : int = 16 ) -> List[str]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__magic_name__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
__magic_name__ , padding="""longest""" , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
lowercase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Union[str, Any] = mocked_dataloaders # noqa: F811
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __magic_name__ ) == "1":
lowercase__ = 2
# New Code #
lowercase__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__magic_name__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["""lr"""]
lowercase__ = int(config["""num_epochs"""] )
lowercase__ = int(config["""seed"""] )
lowercase__ = int(config["""batch_size"""] )
lowercase__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(__magic_name__ )
lowercase__ , lowercase__ = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__magic_name__ ):
lowercase__ = model(**__magic_name__ )
lowercase__ = output.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**__magic_name__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __magic_name__ )
def UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__magic_name__ , default=__magic_name__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__magic_name__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase__ = parser.parse_args()
lowercase__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 15
| 0
|
'''simple docstring'''
__magic_name__ : List[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_snake_case = Stack()
_snake_case = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE__ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE__ )
elif i == ")":
# RULE 4
_snake_case = operator_stack.peek()
operator_stack.pop()
_snake_case = operand_stack.peek()
operand_stack.pop()
_snake_case = operand_stack.peek()
operand_stack.pop()
_snake_case = operators[opr](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
operand_stack.push(SCREAMING_SNAKE_CASE__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__magic_name__ : int = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 368
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
__magic_name__ : str = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__magic_name__ : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_snake_case = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_snake_case = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_snake_case = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE__ )
return next_generation
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Create output image
_snake_case = Image.new("RGB" , (len(cells[0] ), len(SCREAMING_SNAKE_CASE__ )) )
_snake_case = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE__ ) ):
for y in range(len(cells[0] ) ):
_snake_case = 2_55 - cells[y][x] * 2_55
_snake_case = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE__ )
_snake_case = new_generation(SCREAMING_SNAKE_CASE__ )
return images
if __name__ == "__main__":
__magic_name__ : Optional[Any] = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 368
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 513
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[torch.FloatTensor] = None
snake_case : torch.FloatTensor = None
snake_case : Optional[Tuple[torch.FloatTensor]] = None
snake_case : Optional[Tuple[torch.FloatTensor]] = None
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=512 , __lowerCAmelCase="cls" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = project_dim
UpperCamelCase__ = pooler_fn
UpperCamelCase__ = learn_encoder
UpperCamelCase__ = use_attention_mask
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : int = [r"""pooler""", r"""logit_scale"""]
snake_case : Tuple = [r"""position_ids""", r"""predictions.decoder.bias"""]
snake_case : str = """roberta"""
snake_case : Dict = RobertaSeriesConfig
def __init__( self , __lowerCAmelCase ):
super().__init__(__lowerCAmelCase )
UpperCamelCase__ = XLMRobertaModel(__lowerCAmelCase )
UpperCamelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCamelCase__ = getattr(__lowerCAmelCase , """has_pre_transformation""" , __lowerCAmelCase )
if self.has_pre_transformation:
UpperCamelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCamelCase__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowerCamelCase ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
UpperCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ = self.base_model(
input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , position_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase , inputs_embeds=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , output_attentions=__lowerCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__lowerCAmelCase , )
if self.has_pre_transformation:
UpperCamelCase__ = outputs["""hidden_states"""][-2]
UpperCamelCase__ = self.pre_LN(__lowerCAmelCase )
UpperCamelCase__ = self.transformation_pre(__lowerCAmelCase )
return TransformationModelOutput(
projection_state=__lowerCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCamelCase__ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__lowerCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 619
| 0
|
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
A_ = gray_code_sequence_string(_lowercase )
#
# convert them to integers
for i in range(len(_lowercase ) ):
A_ = int(sequence[i], 2 )
return sequence
def _UpperCAmelCase ( _UpperCamelCase : List[str] ) -> Tuple:
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
A_ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
A_ = gray_code_sequence_string(bit_count - 1 )
A_ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
A_ = '''0''' + smaller_sequence[i]
sequence.append(_lowercase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
A_ = '''1''' + smaller_sequence[i]
sequence.append(_lowercase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
'''simple docstring'''
from __future__ import annotations
__snake_case : str = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
A_ = graph
# mapping node to its parent in resulting breadth first tree
A_ = {}
A_ = source_vertex
def __A ( self ) -> None:
A_ = {self.source_vertex}
A_ = None
A_ = [self.source_vertex] # first in first out queue
while queue:
A_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_SCREAMING_SNAKE_CASE )
A_ = vertex
queue.append(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
A_ = self.parent.get(_SCREAMING_SNAKE_CASE )
if target_vertex_parent is None:
A_ = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_SCREAMING_SNAKE_CASE )
return self.shortest_path(_SCREAMING_SNAKE_CASE ) + F'''->{target_vertex}'''
if __name__ == "__main__":
__snake_case : List[Any] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 174
| 0
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCamelCase = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowerCamelCase = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
lowerCamelCase = 'zero2'
lowerCamelCase = 'zero3'
lowerCamelCase = [ZEROa, ZEROa]
def a_ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple =parameterized.to_safe_name('_'.join(str(SCREAMING_SNAKE_CASE__ ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowerCamelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class A ( UpperCamelCase_ ):
@parameterized.expand(lowercase_ , name_func=lowercase_ )
def lowerCamelCase ( self : str , lowercase_ : Optional[int] , lowercase_ : str ) -> Union[str, Any]:
"""simple docstring"""
self.run_and_check(
stage=lowercase_ , model=lowercase_ , distributed=lowercase_ , fpaa=lowercase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowercase_ , name_func=lowercase_ )
def lowerCamelCase ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : str ) -> Optional[int]:
"""simple docstring"""
self.run_and_check(
stage=lowercase_ , model=lowercase_ , distributed=lowercase_ , fpaa=lowercase_ , )
@parameterized.expand(lowercase_ , name_func=lowercase_ )
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : int , lowercase_ : str ) -> Any:
"""simple docstring"""
self.run_and_check(
stage=lowercase_ , model=lowercase_ , distributed=lowercase_ , fpaa=lowercase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowercase_ , name_func=lowercase_ )
def lowerCamelCase ( self : Optional[int] , lowercase_ : Any , lowercase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.run_and_check(
stage=lowercase_ , model=lowercase_ , distributed=lowercase_ , fpaa=lowercase_ , )
def lowerCamelCase ( self : Any , lowercase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowerCamelCase ( self : str , lowercase_ : str , lowercase_ : str , lowercase_ : int = 10 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : bool = True , ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =models[model]
_lowerCamelCase : str =self.run_trainer(
stage=lowercase_ , model_name=lowercase_ , eval_steps=lowercase_ , num_train_epochs=1 , distributed=lowercase_ , fpaa=lowercase_ , )
self.do_checks(lowercase_ )
return output_dir
def lowerCamelCase ( self : Tuple , lowercase_ : str , lowercase_ : str , lowercase_ : int = 10 , lowercase_ : int = 1 , lowercase_ : bool = True , lowercase_ : bool = True , ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Dict =self.get_auto_remove_tmp_dir('./xxx' , after=lowercase_ )
_lowerCamelCase : Any =F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase_ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCamelCase : Optional[int] =F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCamelCase : Any =[F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCamelCase : Tuple =self.get_launcher(lowercase_ )
_lowerCamelCase : Tuple =launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase_ , env=self.get_env() )
return output_dir
def lowerCamelCase ( self : List[Any] , lowercase_ : List[Any]=False ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 464
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A ( UpperCamelCase_ ):
UpperCamelCase__ : Union[str, Any] ='vit_msn'
def __init__( self : Union[str, Any] , lowercase_ : List[str]=768 , lowercase_ : Optional[int]=12 , lowercase_ : List[str]=12 , lowercase_ : List[Any]=3072 , lowercase_ : int="gelu" , lowercase_ : List[Any]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Any=1E-06 , lowercase_ : Union[str, Any]=224 , lowercase_ : Optional[int]=16 , lowercase_ : List[Any]=3 , lowercase_ : Any=True , **lowercase_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase_ )
_lowerCamelCase : Dict =hidden_size
_lowerCamelCase : Any =num_hidden_layers
_lowerCamelCase : Union[str, Any] =num_attention_heads
_lowerCamelCase : List[Any] =intermediate_size
_lowerCamelCase : Tuple =hidden_act
_lowerCamelCase : Any =hidden_dropout_prob
_lowerCamelCase : Dict =attention_probs_dropout_prob
_lowerCamelCase : Any =initializer_range
_lowerCamelCase : List[Any] =layer_norm_eps
_lowerCamelCase : Optional[int] =image_size
_lowerCamelCase : Optional[int] =patch_size
_lowerCamelCase : List[Any] =num_channels
_lowerCamelCase : Optional[Any] =qkv_bias
| 464
| 1
|
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase=1024 , _lowercase=1024 , _lowercase=3.6 ) -> Dict:
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : List[str] = tokenizer.bos_token_id
_lowerCamelCase : Optional[int] = dataset
_lowerCamelCase : Optional[int] = seq_length
_lowerCamelCase : Tuple = seq_length * chars_per_token * num_of_sequences
def __iter__( self ) -> Optional[Any]:
_lowerCamelCase : Tuple = iter(self.dataset )
_lowerCamelCase : Any = True
while more_examples:
_lowerCamelCase : int = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_lowercase )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Union[str, Any] = False
break
_lowerCamelCase : Dict = tokenizer(_lowercase , truncation=_lowercase )['''input_ids''']
_lowerCamelCase : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_lowercase ) , self.seq_length ):
_lowerCamelCase : str = all_token_ids[i : i + self.seq_length]
if len(_lowercase ) == self.seq_length:
yield torch.tensor(_lowercase )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
_lowerCamelCase : int = {'''streaming''': True}
_lowerCamelCase : Union[str, Any] = load_dataset(args.dataset_name , split='''train''' , **SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Dict = ConstantLengthDataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , seq_length=args.seq_length )
_lowerCamelCase : Any = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=args.batch_size )
return eval_dataloader
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
model.eval()
_lowerCamelCase : List[str] = []
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : str = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(SCREAMING_SNAKE_CASE_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Union[str, Any] = torch.mean(torch.cat(SCREAMING_SNAKE_CASE_ ) )
try:
_lowerCamelCase : Any = torch.exp(SCREAMING_SNAKE_CASE_ )
except OverflowError:
_lowerCamelCase : List[Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
SCREAMING_SNAKE_CASE__ : int =Accelerator()
# Parse configuration
SCREAMING_SNAKE_CASE__ : List[Any] =HfArgumentParser(EvaluationArguments)
SCREAMING_SNAKE_CASE__ : int =parser.parse_args()
set_seed(args.seed)
# Logging
SCREAMING_SNAKE_CASE__ : int =logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt)
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
SCREAMING_SNAKE_CASE__ : Union[str, Any] =create_dataloader(args)
# Prepare everything with our `accelerator`.
SCREAMING_SNAKE_CASE__ : Tuple =accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
SCREAMING_SNAKE_CASE__ : Any =evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 703
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE__ : List[Any] =16
SCREAMING_SNAKE_CASE__ : Union[str, Any] =32
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 ) ->List[str]:
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_lowerCamelCase : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase : Optional[int] = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase : Any = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase : List[str] = 8
else:
_lowerCamelCase : int = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
_lowerCamelCase : Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE__ : Tuple =mocked_dataloaders # noqa: F811
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , SCREAMING_SNAKE_CASE_ ) == "1":
_lowerCamelCase : str = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_lowerCamelCase : List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
_lowerCamelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : List[str] = config['''lr''']
_lowerCamelCase : Tuple = int(config['''num_epochs'''] )
_lowerCamelCase : Tuple = int(config['''seed'''] )
_lowerCamelCase : str = int(config['''batch_size'''] )
set_seed(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase, _lowerCamelCase : List[str] = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_lowerCamelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCamelCase : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
_lowerCamelCase : Union[str, Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : int = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase : List[Any] = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
_lowerCamelCase : List[str] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_lowerCamelCase : Tuple = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split('''.''' )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_lowerCamelCase : Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Tuple = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_lowerCamelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = outputs.logits.argmax(dim=-1 )
_lowerCamelCase, _lowerCamelCase : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
_lowerCamelCase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(SCREAMING_SNAKE_CASE_ ),
'''epoch''': epoch,
} , step=SCREAMING_SNAKE_CASE_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def UpperCamelCase ( ) ->Optional[Any]:
_lowerCamelCase : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=SCREAMING_SNAKE_CASE_ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
_lowerCamelCase : str = parser.parse_args()
_lowerCamelCase : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 558
| 0
|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def a_ ( _lowerCAmelCase ) -> List[str]:
__lowerCamelCase : str = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
__lowerCamelCase : int = re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' ,_lowerCAmelCase )
if matches:
__lowerCamelCase : str = float(matches[1] )
__lowerCamelCase : Tuple = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__lowerCamelCase : int = 1001
__lowerCamelCase : int = 'imagenet-1k-id2label.json'
__lowerCamelCase : Union[str, Any] = 'huggingface/label-files'
__lowerCamelCase : str = json.load(open(hf_hub_download(_lowerCAmelCase ,_lowerCAmelCase ,repo_type='dataset' ) ,'r' ) )
__lowerCamelCase : int = {int(_lowerCAmelCase ) + 1: v for k, v in idalabel.items()}
__lowerCamelCase : List[str] = 'background'
__lowerCamelCase : Optional[Any] = idalabel
__lowerCamelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def a_ ( ) -> str:
__lowerCamelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase : Tuple = Image.open(requests.get(_lowerCAmelCase ,stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=False ) -> str:
__lowerCamelCase : Optional[Any] = get_mobilenet_va_config(_lowerCAmelCase )
# Load 🤗 model
__lowerCamelCase : List[Any] = MobileNetVaForImageClassification(_lowerCAmelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__lowerCamelCase : Optional[Any] = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} ,size={'shortest_edge': config.image_size + 32} ,)
__lowerCamelCase : List[Any] = image_processor(images=prepare_img() ,return_tensors='pt' )
__lowerCamelCase : Dict = model(**_lowerCAmelCase )
__lowerCamelCase : List[str] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__lowerCamelCase : List[Any] = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
__lowerCamelCase : Dict = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
__lowerCamelCase : Optional[int] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
__lowerCamelCase : int = 'google/' + model_name
image_processor.push_to_hub(_lowerCAmelCase )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 459
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : str , _a : Dict , _a : List[str]=13 , _a : List[str]=7 , _a : Union[str, Any]=True , _a : List[Any]=True , _a : Optional[Any]=True , _a : Any=True , _a : Optional[Any]=99 , _a : List[str]=32 , _a : Optional[Any]=5 , _a : str=4 , _a : str=37 , _a : List[Any]="gelu" , _a : List[Any]=0.1 , _a : Optional[int]=0.1 , _a : Optional[Any]=128 , _a : Tuple=32 , _a : List[Any]=16 , _a : Optional[int]=2 , _a : List[str]=0.02 , _a : List[str]=3 , _a : Any=4 , _a : List[str]=None , ) -> Any:
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : List[str] = seq_length
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Optional[int] = use_token_type_ids
__lowerCamelCase : Union[str, Any] = use_labels
__lowerCamelCase : Tuple = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : List[str] = max_position_embeddings
__lowerCamelCase : str = type_vocab_size
__lowerCamelCase : Optional[Any] = type_sequence_label_size
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Tuple = num_labels
__lowerCamelCase : Tuple = num_choices
__lowerCamelCase : Optional[int] = scope
def _lowercase ( self : Optional[int] ) -> Dict:
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Optional[int] = None
if self.use_input_mask:
__lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : List[str] ) -> int:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def _lowercase ( self : Tuple ) -> Optional[Any]:
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) : List[Any] = self.prepare_config_and_inputs()
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowercase ( self : Optional[int] , _a : List[Any] , _a : Dict , _a : Union[str, Any] , _a : Tuple , _a : Tuple , _a : Dict , _a : Any ) -> Tuple:
__lowerCamelCase : List[Any] = NezhaModel(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : Any = model(_a , attention_mask=_a , token_type_ids=_a )
__lowerCamelCase : int = model(_a , token_type_ids=_a )
__lowerCamelCase : Optional[Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Optional[Any] , _a : List[str] , _a : Dict , _a : Optional[Any] , _a : int , _a : List[str] , _a : Optional[int] , _a : List[str] , _a : Optional[int] , _a : Any , ) -> Dict:
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : Union[str, Any] = NezhaModel(_a )
model.to(_a )
model.eval()
__lowerCamelCase : Any = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
__lowerCamelCase : Tuple = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
__lowerCamelCase : str = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Union[str, Any] , _a : Optional[int] , _a : int , _a : Optional[Any] , _a : Any , _a : Tuple , _a : Optional[int] , _a : int ) -> List[Any]:
__lowerCamelCase : int = NezhaForMaskedLM(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : Optional[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : int , _a : Tuple , _a : List[Any] , _a : Any , _a : Optional[Any] , _a : Dict , _a : Dict , _a : List[Any] ) -> str:
__lowerCamelCase : str = NezhaForNextSentencePrediction(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : str = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowercase ( self : Any , _a : List[str] , _a : str , _a : List[Any] , _a : str , _a : Union[str, Any] , _a : int , _a : Tuple ) -> Dict:
__lowerCamelCase : List[str] = NezhaForPreTraining(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : Optional[int] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , next_sentence_label=_a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowercase ( self : int , _a : Dict , _a : Any , _a : Any , _a : Tuple , _a : List[str] , _a : Any , _a : List[Any] ) -> List[Any]:
__lowerCamelCase : Any = NezhaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : List[Any] = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Optional[int] , _a : str , _a : Tuple , _a : List[str] , _a : List[str] , _a : Any , _a : str , _a : Union[str, Any] ) -> int:
__lowerCamelCase : Optional[int] = self.num_labels
__lowerCamelCase : List[str] = NezhaForSequenceClassification(_a )
model.to(_a )
model.eval()
__lowerCamelCase : Any = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : List[Any] , _a : List[Any] , _a : str , _a : Tuple , _a : Dict , _a : Dict , _a : Union[str, Any] , _a : List[Any] ) -> int:
__lowerCamelCase : int = self.num_labels
__lowerCamelCase : int = NezhaForTokenClassification(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : List[str] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : List[str] , _a : Any , _a : List[str] , _a : Tuple , _a : Optional[Any] , _a : Optional[Any] , _a : str , _a : Optional[Any] ) -> int:
__lowerCamelCase : List[Any] = self.num_choices
__lowerCamelCase : Optional[Any] = NezhaForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : str = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : List[str] ) -> Any:
__lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) : Optional[int] = config_and_inputs
__lowerCamelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =(
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
a_ =(
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ =True
def _lowercase ( self : List[str] , _a : Union[str, Any] , _a : Any , _a : List[Any]=False ) -> Optional[Any]:
__lowerCamelCase : List[str] = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class in get_values(_a ):
__lowerCamelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_a )
__lowerCamelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def _lowercase ( self : Optional[int] ) -> Any:
__lowerCamelCase : Dict = NezhaModelTester(self )
__lowerCamelCase : int = ConfigTester(self , config_class=_a , hidden_size=37 )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def _lowercase ( self : int ) -> List[str]:
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_a )
def _lowercase ( self : str ) -> Any:
# This regression test was failing with PyTorch < 1.3
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCamelCase : Dict = None
self.model_tester.create_and_check_model_as_decoder(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
def _lowercase ( self : int ) -> Tuple:
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _lowercase ( self : str ) -> str:
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def _lowercase ( self : List[str] ) -> int:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_a )
def _lowercase ( self : Any ) -> Any:
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def _lowercase ( self : Optional[int] ) -> str:
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def _lowercase ( self : Dict ) -> Any:
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : List[str] = NezhaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@slow
@require_torch_gpu
def _lowercase ( self : List[Any] ) -> str:
__lowerCamelCase ,__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowerCamelCase : List[str] = True
__lowerCamelCase : Dict = model_class(config=_a )
__lowerCamelCase : List[str] = self._prepare_for_class(_a , _a )
__lowerCamelCase : Dict = torch.jit.trace(
_a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_a , os.path.join(_a , 'bert.pt' ) )
__lowerCamelCase : int = torch.jit.load(os.path.join(_a , 'bert.pt' ) , map_location=_a )
loaded(inputs_dict['input_ids'].to(_a ) , inputs_dict['attention_mask'].to(_a ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Dict ) -> Optional[int]:
__lowerCamelCase : Dict = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
__lowerCamelCase : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase : Dict = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase : Tuple = model(_a , attention_mask=_a )[0]
__lowerCamelCase : Optional[int] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _a )
__lowerCamelCase : Union[str, Any] = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) )
@slow
def _lowercase ( self : Dict ) -> Dict:
__lowerCamelCase : Dict = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
__lowerCamelCase : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(_a , attention_mask=_a )[0]
__lowerCamelCase : Optional[Any] = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , _a )
__lowerCamelCase : Optional[Any] = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) )
| 459
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCAmelCase_ = generate_large_matrix()
UpperCAmelCase_ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def SCREAMING_SNAKE_CASE ( a_ : list[list[int]] ):
assert all(row == sorted(a_ , reverse=a_ ) for row in grid )
assert all(list(a_ ) == sorted(a_ , reverse=a_ ) for col in zip(*a_ ) )
def SCREAMING_SNAKE_CASE ( a_ : list[int] ):
__a = 0
__a = len(a_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__a = (left + right) // 2
__a = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__a = mid + 1
else:
__a = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(a_ )
def SCREAMING_SNAKE_CASE ( a_ : list[list[int]] ):
__a = 0
__a = len(grid[0] )
for i in range(len(a_ ) ):
__a = find_negative_index(grid[i][:bound] )
total += bound
return (len(a_ ) * len(grid[0] )) - total
def SCREAMING_SNAKE_CASE ( a_ : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def SCREAMING_SNAKE_CASE ( a_ : list[list[int]] ):
__a = 0
for row in grid:
for i, number in enumerate(a_ ):
if number < 0:
total += len(a_ ) - i
break
return total
def SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('Running benchmarks' )
__a = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__a = timeit(f"{func}(grid=grid)" , setup=a_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 706
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
UpperCAmelCase_ = "path-to-your-trained-model"
UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
UpperCAmelCase_ = "A photo of sks dog in a bucket"
UpperCAmelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 490
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 299
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( a_ , unittest.TestCase ):
_lowerCamelCase :List[str] = SpeechTaTokenizer
_lowerCamelCase :Union[str, Any] = False
_lowerCamelCase :Optional[Any] = True
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Tuple = SpeechTaTokenizer(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = AddedToken("""<mask>""" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
lowerCAmelCase__ : Dict = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : str ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = """this is a test"""
lowerCAmelCase__ : List[Any] = """this is a test"""
return input_text, output_text
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Tuple=False , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : Union[str, Any]=5 ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_input_output_texts(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
return text, ids
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = """<pad>"""
lowerCAmelCase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCamelCase ) , 81 )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def _lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : int = self.get_tokenizers(do_lower_case=UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ : Dict = tokenizer.vocab_size
lowerCAmelCase__ : Tuple = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase__ : Optional[int] = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
lowerCAmelCase__ : Tuple = tokenizer.add_tokens(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = tokenizer.vocab_size
lowerCAmelCase__ : List[str] = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , len(UpperCamelCase ) )
self.assertEqual(UpperCamelCase , all_size + len(UpperCamelCase ) )
lowerCAmelCase__ : Optional[Any] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCamelCase )
self.assertGreaterEqual(len(UpperCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase__ : int = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
lowerCAmelCase__ : Tuple = tokenizer.add_special_tokens(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer.vocab_size
lowerCAmelCase__ : Dict = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , len(UpperCamelCase ) )
self.assertEqual(UpperCamelCase , all_size_a + len(UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCamelCase )
self.assertGreaterEqual(len(UpperCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.get_tokenizer()
lowerCAmelCase__ : Optional[int] = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCamelCase , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
lowerCAmelCase__ : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
lowerCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(UpperCamelCase )
# fmt: off
self.assertListEqual(UpperCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
lowerCAmelCase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
# Use custom sequence because this tokenizer does not handle numbers.
lowerCAmelCase__ : Union[str, Any] = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
lowerCAmelCase__ : Union[str, Any] = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCamelCase , )
| 299
| 1
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__snake_case :Tuple =datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
A_ : Any = None
A_ : int = 'utf-8'
A_ : List[str] = None
A_ : Tuple = None
A_ : Tuple = True # deprecated
A_ : Tuple = None # deprecated
A_ : List[Any] = 1_0 << 2_0 # 10MB
A_ : Any = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
A_ : List[Any] = JsonConfig
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
A = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : List[Any] ) -> List[str]:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
A = data_files
if isinstance(_a , _a ):
A = [files]
A = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
A = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
A = [files]
A = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'files': files} ) )
return splits
def __UpperCamelCase ( self : int , __UpperCamelCase : Tuple ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A = self.config.features.arrow_schema.field(_a ).type
A = pa_table.append_column(_a , pa.array([None] * len(_a ) , type=_a ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def __UpperCamelCase ( self : Any , __UpperCamelCase : Any ) -> Dict:
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A = json.load(_a )
# We keep only the field we are interested in
A = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_a , (list, tuple) ):
A = set().union(*[row.keys() for row in dataset] )
A = {col: [row.get(_a ) for row in dataset] for col in keys}
else:
A = dataset
A = pa.Table.from_pydict(_a )
yield file_idx, self._cast_table(_a )
# If the file has one json object per line
else:
with open(_a , 'rb' ) as f:
A = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A = max(self.config.chunksize // 32 , 16 << 10 )
A = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_a )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A = batch.decode(self.config.encoding , errors=_a ).encode('utf-8' )
try:
while True:
try:
A = paj.read_json(
io.BytesIO(_a ) , read_options=paj.ReadOptions(block_size=_a ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_a , pa.ArrowInvalid )
and "straddling" not in str(_a )
or block_size > len(_a )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(_a )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A = json.load(_a )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_a , _a ): # list is the only sequence type supported in JSON
try:
A = set().union(*[row.keys() for row in dataset] )
A = {col: [row.get(_a ) for row in dataset] for col in keys}
A = pa.Table.from_pydict(_a )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(_a )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
batch_idx += 1
| 710
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :int =logging.get_logger(__name__)
__snake_case :Any ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : List[Any] = 'switch_transformers'
A_ : str = ['past_key_values']
A_ : Optional[int] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Dict , __UpperCamelCase : List[str]=32_128 , __UpperCamelCase : Optional[Any]=768 , __UpperCamelCase : Optional[int]=64 , __UpperCamelCase : Tuple=2_048 , __UpperCamelCase : Dict=64 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : List[Any]=12 , __UpperCamelCase : Optional[int]=8 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Optional[int]=0.0_1 , __UpperCamelCase : Any="float32" , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[Any]=32 , __UpperCamelCase : List[Any]=128 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : int=1e-6 , __UpperCamelCase : Union[str, Any]=0.0_0_1 , __UpperCamelCase : Tuple=0.0_0_1 , __UpperCamelCase : int=1.0 , __UpperCamelCase : Optional[Any]="relu" , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : List[str]=False , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=0 , __UpperCamelCase : int=1 , **__UpperCamelCase : Union[str, Any] , ) -> Dict:
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_sparse_encoder_layers
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
A = self.num_layers // self.num_sparse_encoder_layers
else:
A = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
A = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
A = self.num_decoder_layers # HACK: this will create 0 sparse layers
A = num_heads
A = num_experts
A = expert_capacity
A = router_bias
A = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
A = router_dtype
A = router_ignore_padding_tokens
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = add_router_probs
A = router_z_loss_coef
A = router_aux_loss_coef
A = self.feed_forward_proj.split('-' )
A = act_info[-1]
A = act_info[0] == 'gated'
if len(__UpperCamelCase ) > 1 and act_info[0] != "gated" or len(__UpperCamelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A = 'gelu_new'
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase , )
| 224
| 0
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_a : Union[str, Any] = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
_a : Union[str, Any] = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
_a : List[str] = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=4 , _lowerCAmelCase=False ):
'''simple docstring'''
lowerCAmelCase__ :Any = compute_bleu(
reference_corpus=_lowerCAmelCase , translation_corpus=_lowerCAmelCase , max_order=_lowerCAmelCase , smooth=_lowerCAmelCase )
((lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__)) :Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 145
| 0
|
def __lowercase( UpperCAmelCase__ = 200 ):
"""simple docstring"""
lowerCamelCase = [1, 2, 5, 10, 20, 50, 100, 200]
lowerCamelCase = [0] * (pence + 1)
lowerCamelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(UpperCAmelCase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 711
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ : Optional[int] = (3, 9, -1_1, 0, 7, 5, 1, -1)
a_ : str = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = 42
_A = 42
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a ):
'''simple docstring'''
lowerCamelCase = None
for i in sorted(__a , reverse=__a ):
lowerCamelCase = Node(__a , self.head )
def __iter__(self ):
'''simple docstring'''
lowerCamelCase = self.head
while node:
yield node.data
lowerCamelCase = node.next_node
def __len__(self ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__(self ):
'''simple docstring'''
return " -> ".join([str(__a ) for node in self] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Any = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 484
| 0
|
def _lowerCAmelCase ( A__: int , A__: Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = [1]
for i in range(2 , A__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase = []
UpperCAmelCase = list(range(A__ ) )
# Find permutation
while factorials:
UpperCAmelCase = factorials.pop()
UpperCAmelCase , UpperCAmelCase = divmod(A__ , A__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VQModel
__SCREAMING_SNAKE_CASE = """sample"""
@property
def snake_case_ ( self , _snake_case=(32, 32) ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = 4
UpperCAmelCase = 3
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
return {"sample": image}
@property
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
return (3, 32, 32)
@property
def snake_case_ ( self ) -> int:
"""simple docstring"""
return (3, 32, 32)
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def snake_case_ ( self ) -> int:
"""simple docstring"""
pass
def snake_case_ ( self ) -> int:
"""simple docstring"""
pass
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_snake_case )
UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(_snake_case ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
UpperCAmelCase = image.to(_snake_case )
with torch.no_grad():
UpperCAmelCase = model(_snake_case ).sample
UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3 ) )
| 254
| 1
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase__ ( __a , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {'''latents'''}
SCREAMING_SNAKE_CASE = False
# No `output_type`.
SCREAMING_SNAKE_CASE = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
UpperCAmelCase = CLIPTextModel(a_ )
UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _UpperCamelCase ( self ,A ,A=0 ):
UpperCAmelCase = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("""mps""" ):
UpperCAmelCase = torch.manual_seed(a_ )
else:
UpperCAmelCase = torch.Generator(device=a_ ).manual_seed(a_ )
UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def _UpperCamelCase ( self ):
UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = VideoToVideoSDPipeline(**a_ )
UpperCAmelCase = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
UpperCAmelCase = self.get_dummy_inputs(a_ )
UpperCAmelCase = """np"""
UpperCAmelCase = sd_pipe(**a_ ).frames
UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def _UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a_ ,expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase = torch.randn((1, 10, 3, 1_024, 576) ,generator=a_ )
UpperCAmelCase = video.to("""cuda""" )
UpperCAmelCase = """Spiderman is surfing"""
UpperCAmelCase = pipe(a_ ,video=a_ ,generator=a_ ,num_inference_steps=3 ,output_type="""pt""" ).frames
UpperCAmelCase = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 706
|
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_UpperCamelCase = True
from torch.cuda.amp import autocast
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
SCREAMING_SNAKE_CASE = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase = logging.INFO
logger.setLevel(_snake_case )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
SCREAMING_SNAKE_CASE = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
SCREAMING_SNAKE_CASE = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
SCREAMING_SNAKE_CASE = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
SCREAMING_SNAKE_CASE = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = "longest"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __call__( self ,A ):
# reformat list to dict and set to pytorch format
UpperCAmelCase = self.feature_extractor.pad(
A ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
UpperCAmelCase = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
UpperCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase = 1
UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=A ,min_masks=2 ,)
return batch
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,A=1 ,A=0 ,A=1.0 ,**A ):
super().__init__(*A ,**A )
UpperCAmelCase = 0
UpperCAmelCase = max_gumbel_temp
UpperCAmelCase = min_gumbel_temp
UpperCAmelCase = gumbel_temp_decay
def _UpperCamelCase ( self ,A ,A ):
model.train()
UpperCAmelCase = self._prepare_inputs(A )
if self.use_amp:
with autocast():
UpperCAmelCase = self.compute_loss(A ,A )
else:
UpperCAmelCase = self.compute_loss(A ,A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A ).backward()
elif self.use_apex:
with amp.scale_loss(A ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(_snake_case , _snake_case )
# Downloading and loading a dataset from the hub.
UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_snake_case )
def prepare_dataset(_snake_case ):
# check that all files have the correct sampling rate
UpperCAmelCase , UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase = datasets.map(
_snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
UpperCAmelCase = vectorized_datasets.filter(
lambda _snake_case : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_snake_case ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase = vectorized_datasets.map(
_snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
UpperCAmelCase = WavaVecaForPreTraining(_snake_case )
UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_snake_case , feature_extractor=_snake_case )
UpperCAmelCase = WavaVecaPreTrainer(
model=_snake_case , data_collator=_snake_case , args=_snake_case , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 74
| 0
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Tuple = '''MCTCTFeatureExtractor'''
UpperCamelCase_ : Dict = '''AutoTokenizer'''
def __init__( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = self.feature_extractor
UpperCamelCase = False
def __call__( self : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str )-> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCamelCase = kwargs.pop("raw_speech" )
else:
UpperCamelCase = kwargs.pop("audio" , UpperCAmelCase_ )
UpperCamelCase = kwargs.pop("sampling_rate" , UpperCAmelCase_ )
UpperCamelCase = kwargs.pop("text" , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
UpperCamelCase = args[0]
UpperCamelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCamelCase = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None:
UpperCamelCase = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCamelCase = encodings["input_ids"]
return inputs
def _SCREAMING_SNAKE_CASE ( self : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict )-> str:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCamelCase = kwargs.pop("input_features" , UpperCAmelCase_ )
UpperCamelCase = kwargs.pop("labels" , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
UpperCamelCase = args[0]
UpperCamelCase = args[1:]
if input_features is not None:
UpperCamelCase = self.feature_extractor.pad(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
if labels is not None:
UpperCamelCase = self.tokenizer.pad(UpperCAmelCase_ , **UpperCAmelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCamelCase = labels["input_ids"]
return input_features
def _SCREAMING_SNAKE_CASE ( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@contextmanager
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCamelCase = True
UpperCamelCase = self.tokenizer
yield
UpperCamelCase = self.feature_extractor
UpperCamelCase = False
| 554
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False , )-> str:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = False
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
UpperCamelCase = TaConfig(
vocab_size=UpperCAmelCase_ , d_model=UpperCAmelCase_ , num_heads=UpperCAmelCase_ , d_kv=UpperCAmelCase_ , d_ff=UpperCAmelCase_ , dropout_rate=UpperCAmelCase_ , feed_forward_proj=UpperCAmelCase_ , is_decoder=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(UpperCAmelCase_ ):
UpperCamelCase = TaBlock(UpperCAmelCase_ )
self.encoders.append(UpperCAmelCase_ )
UpperCamelCase = TaLayerNorm(UpperCAmelCase_ )
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.token_embedder(UpperCAmelCase_ )
UpperCamelCase = encoder_input_tokens.shape[1]
UpperCamelCase = torch.arange(UpperCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCAmelCase_ )
UpperCamelCase = self.dropout_pre(UpperCAmelCase_ )
# inverted the attention mask
UpperCamelCase = encoder_input_tokens.size()
UpperCamelCase = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ )
for lyr in self.encoders:
UpperCamelCase = lyr(UpperCAmelCase_ , UpperCAmelCase_ )[0]
UpperCamelCase = self.layer_norm(UpperCAmelCase_ )
return self.dropout_post(UpperCAmelCase_ ), encoder_inputs_mask
| 554
| 1
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__: Dict = ['small', 'medium', 'large']
a__: Tuple = 'lm_head.decoder.weight'
a__: Any = 'lm_head.weight'
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : str )->Dict:
A__ = torch.load(UpperCamelCase__ )
A__ = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
a__: Tuple = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
a__: str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__: Any = os.path.join(args.dialogpt_path, F"{MODEL}_ft.pkl")
a__: Optional[Any] = F"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 212
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
a__: Union[str, Any] = logging.getLogger(__name__)
def UpperCamelCase__( )->List[Any]:
A__ = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=UpperCamelCase__ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=UpperCamelCase__ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=UpperCamelCase__ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=UpperCamelCase__ , default='''data/dump''' , help='''The dump file prefix.''' )
A__ = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
A__ = BertTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
A__ = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
A__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
A__ = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
A__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
A__ = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
A__ = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f"{len(UpperCamelCase__ )} examples to process." )
A__ = []
A__ = 0
A__ = 1_00_00
A__ = time.time()
for text in data:
A__ = f"{bos} {text.strip()} {sep}"
A__ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
rslt.append(UpperCamelCase__ )
iter += 1
if iter % interval == 0:
A__ = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
A__ = time.time()
logger.info('''Finished binarization''' )
logger.info(f"{len(UpperCamelCase__ )} examples processed." )
A__ = f"{args.dump_file}.{args.tokenizer_name}.pickle"
A__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
A__ = [np.uintaa(UpperCamelCase__ ) for d in rslt]
else:
A__ = [np.intaa(UpperCamelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(UpperCamelCase__ , '''wb''' ) as handle:
pickle.dump(rslt_ , UpperCamelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 212
| 1
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ ):
def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = 1
UpperCAmelCase_ = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = True
UpperCAmelCase_ = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _lowercase (self : int ):
self.enable_attention_slicing(__a )
def _lowercase (self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase (self : Optional[int] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ):
UpperCAmelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCAmelCase_ = self.segmentation_model(**__a )
UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 78
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
snake_case_ : Any = """."""
if __name__ == "__main__":
snake_case_ : List[str] = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
snake_case_ : Any = []
snake_case_ : Tuple = []
with open(doctest_file_path) as fp:
for line in fp:
snake_case_ : List[Any] = line.strip()
snake_case_ : List[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
snake_case_ : Union[str, Any] = """\n""".join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 595
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowerCAmelCase__ : Any = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
snake_case__ : Optional[int] = self.transformer_dir
shutil.copy(
os.path.join(__UpperCamelCase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ) -> str:
'''simple docstring'''
snake_case__ : Dict = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
snake_case__ : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
snake_case__ : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
snake_case__ : Union[str, Any] = black.format_str(__UpperCamelCase , mode=__UpperCamelCase )
snake_case__ : Union[str, Any] = os.path.join(self.transformer_dir , 'new_code.py' )
with open(__UpperCamelCase , 'w' , newline='\n' ) as f:
f.write(__UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__UpperCamelCase )
with open(__UpperCamelCase , 'r' ) as f:
self.assertTrue(f.read() , __UpperCamelCase )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , __UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , __UpperCamelCase ) , )
# Copy consistency with a really long name
snake_case__ : str = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub('Bert' , __UpperCamelCase , __UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , __UpperCamelCase , overwrite_result=re.sub('Bert' , 'TestModel' , __UpperCamelCase ) , )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Union[str, Any] = check_copies.LOCALIZED_READMES['README_zh-hans.md']
snake_case__ : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
snake_case__ : Any = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case__ : List[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
snake_case__ , snake_case__ : Tuple = check_copies.convert_to_localized_md(
__UpperCamelCase , __UpperCamelCase , localized_readme['format_model_list'] )
self.assertFalse(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ , snake_case__ : int = check_copies.convert_to_localized_md(
__UpperCamelCase , __UpperCamelCase , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__UpperCamelCase )
snake_case__ : int = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
snake_case__ : List[str] = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case__ : Dict = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case__ , snake_case__ : Optional[int] = check_copies.convert_to_localized_md(
__UpperCamelCase , __UpperCamelCase , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 699
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = IFInpaintingPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> str:
'''simple docstring'''
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : int = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __a ( self ) -> List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __a ( self ) -> List[str]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> List[str]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> int:
'''simple docstring'''
self._test_save_load_local()
def __a ( self ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 699
| 1
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def _lowercase ( self ) -> Any:
_UpperCamelCase : Any = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCamelCase : Dict = load_dataset('''ashraq/esc50''' )
_UpperCamelCase : List[str] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCamelCase : Union[str, Any] = audio_classifier(_snake_case , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(_snake_case ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def _lowercase ( self ) -> Dict:
pass
@slow
@require_torch
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : List[Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCamelCase : int = load_dataset('''ashraq/esc50''' )
_UpperCamelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCamelCase : Optional[int] = audio_classifier(_snake_case , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(_snake_case ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCamelCase : str = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(_snake_case ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCamelCase : List[str] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(_snake_case ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def _lowercase ( self ) -> Optional[Any]:
pass
| 683
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : int = None
_UpperCamelCase : int = 20
_UpperCamelCase : Any = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
_UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Any = jax.nn.softmax(_snake_case , axis=-1 )
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
_UpperCamelCase : str = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[int] = 10
_UpperCamelCase : Any = 2
# create ramp distribution
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Optional[Any] = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Any = None
_UpperCamelCase : Any = 10
_UpperCamelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Dict = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : Dict = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Dict:
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
_UpperCamelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : int = 5
_UpperCamelCase : List[Any] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Optional[int] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = 15
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : str = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : str = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Tuple = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 20
_UpperCamelCase : Tuple = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 5
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Dict = 4
_UpperCamelCase : Dict = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : int = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Optional[int] = 3
_UpperCamelCase : Any = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 4
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Dict = 15
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Any = input_ids.copy()
_UpperCamelCase : int = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : List[str] = 10
# no processor list
_UpperCamelCase : Dict = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Tuple = 4
_UpperCamelCase : int = 10
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : Dict = 2
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[int] = 15
# dummy input_ids and scores
_UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Optional[Any] = input_ids.copy()
_UpperCamelCase : List[str] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : List[Any] = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : List[str] = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
_UpperCamelCase : Dict = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 683
| 1
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = HfArgumentParser(a )
SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE_ : str = TensorFlowBenchmark(args=a )
try:
SCREAMING_SNAKE_CASE_ : int = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE_ : Optional[int] = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ' '.join(str(a ).split(' ' )[:-1] )
SCREAMING_SNAKE_CASE_ : List[Any] = ''
SCREAMING_SNAKE_CASE_ : Dict = eval(str(a ).split(' ' )[-1] )
SCREAMING_SNAKE_CASE_ : str = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(a )
if len(a ) > 0:
SCREAMING_SNAKE_CASE_ : str = full_error_msg + begin_error_msg + str(a )
raise ValueError(a )
benchmark.run()
if __name__ == "__main__":
main()
| 353
|
from math import pi
def A_ ( a , a ):
"""simple docstring"""
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 353
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__a :List[Any] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase(_lowercase ):
__snake_case: jnp.ndarray
__snake_case: jnp.ndarray
class lowercase(nn.Module ):
__snake_case: int
__snake_case: Tuple[int] = (16, 32, 96, 256)
__snake_case: jnp.dtype = jnp.floataa
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a__ = []
for i in range(len(self.block_out_channels ) - 1 ):
a__ = self.block_out_channels[i]
a__ = self.block_out_channels[i + 1]
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__SCREAMING_SNAKE_CASE )
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__SCREAMING_SNAKE_CASE )
a__ = blocks
a__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
a__ = self.conv_in(__SCREAMING_SNAKE_CASE )
a__ = nn.silu(__SCREAMING_SNAKE_CASE )
for block in self.blocks:
a__ = block(__SCREAMING_SNAKE_CASE )
a__ = nn.silu(__SCREAMING_SNAKE_CASE )
a__ = self.conv_out(__SCREAMING_SNAKE_CASE )
return embedding
@flax_register_to_config
class lowercase(nn.Module , _lowercase , _lowercase ):
__snake_case: int = 32
__snake_case: int = 4
__snake_case: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__snake_case: Union[bool, Tuple[bool]] = False
__snake_case: Tuple[int] = (320, 640, 1280, 1280)
__snake_case: int = 2
__snake_case: Union[int, Tuple[int]] = 8
__snake_case: Optional[Union[int, Tuple[int]]] = None
__snake_case: int = 1280
__snake_case: float = 0.0
__snake_case: bool = False
__snake_case: jnp.dtype = jnp.floataa
__snake_case: bool = True
__snake_case: int = 0
__snake_case: str = "rgb"
__snake_case: Tuple[int] = (16, 32, 96, 256)
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> FrozenDict:
"""simple docstring"""
a__ = (1, self.in_channels, self.sample_size, self.sample_size)
a__ = jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
a__ = jnp.ones((1,) , dtype=jnp.intaa )
a__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
a__ = jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
a__ , a__ = jax.random.split(__SCREAMING_SNAKE_CASE )
a__ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )["params"]
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = self.block_out_channels
a__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a__ = self.num_attention_heads or self.attention_head_dim
# input
a__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a__ = FlaxTimestepEmbedding(__SCREAMING_SNAKE_CASE , dtype=self.dtype )
a__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
a__ = self.only_cross_attention
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ = (num_attention_heads,) * len(self.down_block_types )
# down
a__ = []
a__ = []
a__ = block_out_channels[0]
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
for i, down_block_type in enumerate(self.down_block_types ):
a__ = output_channel
a__ = block_out_channels[i]
a__ = i == len(__SCREAMING_SNAKE_CASE ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a__ = FlaxCrossAttnDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
a__ = FlaxDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__SCREAMING_SNAKE_CASE )
for _ in range(self.layers_per_block ):
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
if not is_final_block:
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
a__ = down_blocks
a__ = controlnet_down_blocks
# mid
a__ = block_out_channels[-1]
a__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
a__ = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
a__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
a__ = jnp.flip(__SCREAMING_SNAKE_CASE , axis=1 )
# 1. time
if not isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ):
a__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0:
a__ = timesteps.astype(dtype=jnp.floataa )
a__ = jnp.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
a__ = self.time_proj(__SCREAMING_SNAKE_CASE )
a__ = self.time_embedding(__SCREAMING_SNAKE_CASE )
# 2. pre-process
a__ = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
a__ = self.conv_in(__SCREAMING_SNAKE_CASE )
a__ = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
a__ = self.controlnet_cond_embedding(__SCREAMING_SNAKE_CASE )
sample += controlnet_cond
# 3. down
a__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ , a__ = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
else:
a__ , a__ = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
a__ = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
# 5. contronet blocks
a__ = ()
for down_block_res_sample, controlnet_block in zip(__SCREAMING_SNAKE_CASE , self.controlnet_down_blocks ):
a__ = controlnet_block(__SCREAMING_SNAKE_CASE )
controlnet_down_block_res_samples += (down_block_res_sample,)
a__ = controlnet_down_block_res_samples
a__ = self.controlnet_mid_block(__SCREAMING_SNAKE_CASE )
# 6. scaling
a__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__SCREAMING_SNAKE_CASE , mid_block_res_sample=__SCREAMING_SNAKE_CASE )
| 273
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class lowerCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
UpperCAmelCase = "xlnet"
UpperCAmelCase = ["mems"]
UpperCAmelCase = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , _A : List[str]=3_2000 , _A : List[Any]=1024 , _A : List[str]=24 , _A : Union[str, Any]=16 , _A : Tuple=4096 , _A : Any="gelu" , _A : Dict=True , _A : Tuple="bi" , _A : Tuple=0.02 , _A : Dict=1e-12 , _A : Union[str, Any]=0.1 , _A : str=512 , _A : int=None , _A : Optional[int]=True , _A : Optional[int]=False , _A : Optional[Any]=False , _A : Any=-1 , _A : Any=False , _A : Optional[int]="last" , _A : int=True , _A : Any="tanh" , _A : Any=0.1 , _A : int=5 , _A : str=5 , _A : Optional[int]=5 , _A : Dict=1 , _A : Optional[int]=2 , **_A : Union[str, Any] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = n_layer
_UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(F"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
_UpperCamelCase = d_model // n_head
_UpperCamelCase = ff_activation
_UpperCamelCase = d_inner
_UpperCamelCase = untie_r
_UpperCamelCase = attn_type
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = dropout
_UpperCamelCase = mem_len
_UpperCamelCase = reuse_len
_UpperCamelCase = bi_data
_UpperCamelCase = clamp_len
_UpperCamelCase = same_length
_UpperCamelCase = summary_type
_UpperCamelCase = summary_use_proj
_UpperCamelCase = summary_activation
_UpperCamelCase = summary_last_dropout
_UpperCamelCase = start_n_top
_UpperCamelCase = end_n_top
_UpperCamelCase = bos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __snake_case , )
_UpperCamelCase = kwargs['''use_cache''']
_UpperCamelCase = use_mems_eval
_UpperCamelCase = use_mems_train
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def UpperCamelCase_ ( self : Dict ):
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def UpperCamelCase_ ( self : Optional[Any] , _A : int ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 705
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ ( enum.Enum ):
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 2
@add_end_docstrings(__lowercase )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Tuple , *_A : List[str] , **_A : str ):
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCamelCase = None
if self.model.config.prefix is not None:
_UpperCamelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCamelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params )
_UpperCamelCase = {**self._preprocess_params, **preprocess_params}
_UpperCamelCase = {**self._forward_params, **forward_params}
def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ):
_UpperCamelCase = {}
if prefix is not None:
_UpperCamelCase = prefix
if prefix:
_UpperCamelCase = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
_UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_UpperCamelCase = handle_long_generation
preprocess_params.update(_A )
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_UpperCamelCase = ReturnType.TENSORS
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[str] , _A : str , **_A : Any ):
return super().__call__(_A , **_A )
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ):
_UpperCamelCase = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
_UpperCamelCase = prompt_text
if handle_long_generation == "hole":
_UpperCamelCase = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCamelCase = generate_kwargs['''max_new_tokens''']
else:
_UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_UpperCamelCase = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ):
_UpperCamelCase = model_inputs['''input_ids''']
_UpperCamelCase = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = 1
else:
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_UpperCamelCase = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCamelCase = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
_UpperCamelCase = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ):
_UpperCamelCase = model_outputs['''generated_sequence'''][0]
_UpperCamelCase = model_outputs['''input_ids''']
_UpperCamelCase = model_outputs['''prompt_text''']
_UpperCamelCase = generated_sequence.numpy().tolist()
_UpperCamelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCamelCase = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCamelCase = 0
else:
_UpperCamelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
_UpperCamelCase = prompt_text + text[prompt_length:]
else:
_UpperCamelCase = text[prompt_length:]
_UpperCamelCase = {'''generated_text''': all_text}
records.append(_A )
return records
| 71
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __a (a__):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :torch.FloatTensor
class __a (a__ , a__):
'''simple docstring'''
@register_to_config
def __init__( self , _a = 65_536 , _a = None , _a = 2 , _a = 2 , _a = 0 , _a = "fourier" , _a = True , _a = False , _a = 0.0 , _a = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _a = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _a = "UNetMidBlock1D" , _a = None , _a = (32, 32, 64) , _a = None , _a = 8 , _a = 1 , _a = False , ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample_size
# time
if time_embedding_type == "fourier":
SCREAMING_SNAKE_CASE__ : str = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE__ , log=SCREAMING_SNAKE_CASE__ , flip_sin_to_cos=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
SCREAMING_SNAKE_CASE__ : Optional[int] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE__ , downscale_freq_shift=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = block_out_channels[0]
if use_timestep_embedding:
SCREAMING_SNAKE_CASE__ : int = block_out_channels[0] * 4
SCREAMING_SNAKE_CASE__ : List[str] = TimestepEmbedding(
in_channels=SCREAMING_SNAKE_CASE__ , time_embed_dim=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , out_dim=block_out_channels[0] , )
SCREAMING_SNAKE_CASE__ : Any = nn.ModuleList([] )
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[Any] = nn.ModuleList([] )
SCREAMING_SNAKE_CASE__ : int = None
# down
SCREAMING_SNAKE_CASE__ : str = in_channels
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : int = output_channel
SCREAMING_SNAKE_CASE__ : Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
SCREAMING_SNAKE_CASE__ : Any = i == len(SCREAMING_SNAKE_CASE__ ) - 1
SCREAMING_SNAKE_CASE__ : int = get_down_block(
SCREAMING_SNAKE_CASE__ , num_layers=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(SCREAMING_SNAKE_CASE__ )
# mid
SCREAMING_SNAKE_CASE__ : Tuple = get_mid_block(
SCREAMING_SNAKE_CASE__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE__ , add_downsample=SCREAMING_SNAKE_CASE__ , )
# up
SCREAMING_SNAKE_CASE__ : List[str] = list(reversed(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = reversed_block_out_channels[0]
if out_block_type is None:
SCREAMING_SNAKE_CASE__ : str = out_channels
else:
SCREAMING_SNAKE_CASE__ : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = output_channel
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE__ ) - 1 else final_upsample_channels
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = i == len(SCREAMING_SNAKE_CASE__ ) - 1
SCREAMING_SNAKE_CASE__ : Any = get_up_block(
SCREAMING_SNAKE_CASE__ , num_layers=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = output_channel
# out
SCREAMING_SNAKE_CASE__ : Any = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
SCREAMING_SNAKE_CASE__ : List[Any] = get_out_block(
out_block_type=SCREAMING_SNAKE_CASE__ , num_groups_out=SCREAMING_SNAKE_CASE__ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , fc_dim=block_out_channels[-1] // 4 , )
def _a ( self , _a , _a , _a = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE__ ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ : List[str] = timesteps[None].to(sample.device )
SCREAMING_SNAKE_CASE__ : Tuple = self.time_proj(SCREAMING_SNAKE_CASE__ )
if self.config.use_timestep_embedding:
SCREAMING_SNAKE_CASE__ : Any = self.time_mlp(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = timestep_embed[..., None]
SCREAMING_SNAKE_CASE__ : List[str] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
SCREAMING_SNAKE_CASE__ : List[str] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
SCREAMING_SNAKE_CASE__ : List[Any] = ()
for downsample_block in self.down_blocks:
SCREAMING_SNAKE_CASE__ : Dict = downsample_block(hidden_states=SCREAMING_SNAKE_CASE__ , temb=SCREAMING_SNAKE_CASE__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.mid_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
SCREAMING_SNAKE_CASE__ : Any = down_block_res_samples[-1:]
SCREAMING_SNAKE_CASE__ : Optional[Any] = down_block_res_samples[:-1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = upsample_block(SCREAMING_SNAKE_CASE__ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ , temb=SCREAMING_SNAKE_CASE__ )
# 5. post-process
if self.out_block:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.out_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=SCREAMING_SNAKE_CASE__ )
| 680
|
"""simple docstring"""
def lowercase_ ( _snake_case ):
if not head:
return True
# split the list to two parts
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = head.next, head
while fast and fast.next:
SCREAMING_SNAKE_CASE__ : Tuple = fast.next.next
SCREAMING_SNAKE_CASE__ : Optional[int] = slow.next
SCREAMING_SNAKE_CASE__ : List[Any] = slow.next
SCREAMING_SNAKE_CASE__ : int = None # Don't forget here! But forget still works!
# reverse the second part
SCREAMING_SNAKE_CASE__ : int = None
while second:
SCREAMING_SNAKE_CASE__ : List[str] = second.next
SCREAMING_SNAKE_CASE__ : List[str] = node
SCREAMING_SNAKE_CASE__ : List[Any] = second
SCREAMING_SNAKE_CASE__ : List[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
SCREAMING_SNAKE_CASE__ : Optional[Any] = node.next
SCREAMING_SNAKE_CASE__ : Any = head.next
return True
def lowercase_ ( _snake_case ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = head
while fast and fast.next:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
SCREAMING_SNAKE_CASE__ : Optional[int] = [slow.val]
while slow.next:
SCREAMING_SNAKE_CASE__ : Any = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
SCREAMING_SNAKE_CASE__ : int = cur.next
return True
def lowercase_ ( _snake_case ):
if not head or not head.next:
return True
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : Any = 0
while head:
if head.val in d:
d[head.val].append(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [pos]
SCREAMING_SNAKE_CASE__ : str = head.next
pos += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = pos - 1
SCREAMING_SNAKE_CASE__ : Dict = 0
for v in d.values():
if len(_snake_case ) % 2 != 0:
middle += 1
else:
SCREAMING_SNAKE_CASE__ : str = 0
for i in range(0 ,len(_snake_case ) ):
if v[i] + v[len(_snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 223
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : Union[List[PIL.Image.Image], np.ndarray]
__magic_name__ : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 150
|
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase = """\
Text data.
Second line of data."""
lowercase = """file"""
@pytest.fixture(scope="session" )
def A__ ( _UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case__ : Any = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
snake_case__ : Optional[int] = bytes(_UpperCAmelCase , "utf-8" )
with zstd.open(_UpperCAmelCase , "wb" ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def A__ ( _UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , "w" ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def A__ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
snake_case__ : str = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
snake_case__ : List[str] = input_paths[compression_format]
snake_case__ : List[str] = tmp_path / "cache"
snake_case__ : Tuple = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
snake_case__ : Any = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
snake_case__ : str = f.read()
with open(_UpperCAmelCase ) as f:
snake_case__ : List[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def A__ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = "custom_cache"
snake_case__ : Any = "custom_extracted_dir"
snake_case__ : List[str] = tmp_path / "custom_extracted_path"
if default_extracted:
snake_case__ : Tuple = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _UpperCAmelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_UpperCAmelCase ) )
snake_case__ : Optional[int] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case__ : List[Any] = xz_file
snake_case__ : Union[str, Any] = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
snake_case__ : List[Any] = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def A__ ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[str] = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
snake_case__ : List[str] = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def A__ ( _UpperCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
snake_case__ : Optional[int] = "./__missing_file__.txt"
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def A__ ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_UpperCAmelCase ) as f:
snake_case__ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( ) -> Dict:
'''simple docstring'''
with pytest.raises(_UpperCAmelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : int = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_UpperCAmelCase ):
http_get("https://huggingface.co" , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_UpperCAmelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Dict = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_UpperCAmelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head("s3://huggingface.co" )
| 150
| 1
|
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[Any]:
super().__init__(
lowercase_ , question_encoder_tokenizer=lowercase_ , generator_tokenizer=lowercase_ , index=lowercase_ , init_retrieval=lowercase_ , )
__snake_case = None
def _a ( self , lowercase_) -> Union[str, Any]:
logger.info('initializing retrieval')
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized')
# needs to be set manually
__snake_case = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case = str(distributed_port + 1)
__snake_case = dist.new_group(ranks=lowercase_ , backend='gloo')
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main')
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def _a ( self) -> int:
return dist.get_rank(group=self.process_group) == 0
def _a ( self , lowercase_ , lowercase_ , lowercase_=torch.floataa) -> Dict:
__snake_case = torch.empty(lowercase_ , dtype=lowercase_)
dist.scatter(lowercase_ , src=0 , scatter_list=lowercase_ , group=self.process_group)
return target_tensor
def _a ( self) -> str:
__snake_case = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case = next((addr for addr in addrs if addr.startswith('e')) , lowercase_)
return ifname
def _a ( self , lowercase_ , lowercase_) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
__snake_case , __snake_case = self._main_retrieve(lowercase_ , lowercase_)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase_)
# distributed training
__snake_case = dist.get_world_size(group=self.process_group)
# gather logic
__snake_case = None
if self._is_main():
__snake_case = [torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(lowercase_)]
dist.gather(torch.tensor(lowercase_) , dst=0 , gather_list=lowercase_ , group=self.process_group)
# scatter logic
__snake_case = question_hidden_states.shape[0]
__snake_case = []
__snake_case = []
if self._is_main():
assert len(lowercase_) == world_size
__snake_case , __snake_case = self._main_retrieve(torch.cat(lowercase_).numpy() , lowercase_)
__snake_case , __snake_case = torch.tensor(lowercase_), torch.tensor(lowercase_)
__snake_case = self._chunk_tensor(lowercase_ , lowercase_)
__snake_case = self._chunk_tensor(lowercase_ , lowercase_)
__snake_case = self._scattered(lowercase_ , [n_queries, n_docs] , target_type=torch.intaa)
__snake_case = self._scattered(lowercase_ , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowercase_)
| 313
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _UpperCAmelCase ( A ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCamelCase_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class snake_case_ ( a ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( A_ ) -> Union[str, Any]:
UpperCAmelCase__ =parser.add_parser(
"convert", help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.", )
train_parser.add_argument("--model_type", type=A_, required=A_, help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint", type=A_, required=A_, help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output", type=A_, required=A_, help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config", type=A_, default="", help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name", type=A_, default=A_, help="Optional fine-tuning task name if the TF model was a finetuned model.", )
train_parser.set_defaults(func=A_ )
def __init__( self, A_, A_, A_, A_, A_, *A_, ) -> List[str]:
UpperCAmelCase__ =logging.get_logger("transformers-cli/converting" )
self._logger.info(f"""Loading model {model_type}""" )
UpperCAmelCase__ =model_type
UpperCAmelCase__ =tf_checkpoint
UpperCAmelCase__ =pytorch_dump_output
UpperCAmelCase__ =config
UpperCAmelCase__ =finetuning_task_name
def __UpperCAmelCase ( self ) -> Tuple:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase__ =self._tf_checkpoint
UpperCAmelCase__ =""
else:
UpperCAmelCase__ =self._tf_checkpoint
UpperCAmelCase__ =""
convert_transfo_xl_checkpoint_to_pytorch(
A_, self._config, self._pytorch_dump_output, A_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 510
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_: Optional[Any] = logging.get_logger(__name__)
lowercase_: List[str] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowercase__ (__UpperCAmelCase ):
"""simple docstring"""
__UpperCamelCase : str = 'pegasus'
__UpperCamelCase : Tuple = ['past_key_values']
__UpperCamelCase : str = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Union[str, Any] , __a : Optional[Any]=5_0_2_6_5 , __a : Union[str, Any]=1_0_2_4 , __a : str=1_2 , __a : Optional[int]=4_0_9_6 , __a : Any=1_6 , __a : List[Any]=1_2 , __a : List[str]=4_0_9_6 , __a : Optional[int]=1_6 , __a : Optional[int]=0.0 , __a : Dict=0.0 , __a : Optional[Any]=True , __a : int=True , __a : Optional[Any]="gelu" , __a : List[str]=1_0_2_4 , __a : int=0.1 , __a : Optional[int]=0.0 , __a : Optional[int]=0.0 , __a : Optional[Any]=0.02 , __a : List[Any]=0 , __a : Optional[int]=False , __a : Optional[Any]=0 , __a : Optional[int]=1 , __a : Any=1 , **__a : Dict , ):
snake_case__ : int = vocab_size
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : Optional[int] = d_model
snake_case__ : str = encoder_ffn_dim
snake_case__ : List[Any] = encoder_layers
snake_case__ : int = encoder_attention_heads
snake_case__ : str = decoder_ffn_dim
snake_case__ : List[str] = decoder_layers
snake_case__ : Optional[Any] = decoder_attention_heads
snake_case__ : Tuple = dropout
snake_case__ : str = attention_dropout
snake_case__ : List[Any] = activation_dropout
snake_case__ : List[str] = activation_function
snake_case__ : Tuple = init_std
snake_case__ : List[str] = encoder_layerdrop
snake_case__ : int = decoder_layerdrop
snake_case__ : str = use_cache
snake_case__ : Optional[Any] = encoder_layers
snake_case__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
@property
def lowercase ( self : Union[str, Any] ):
return self.encoder_attention_heads
@property
def lowercase ( self : Optional[int] ):
return self.d_model
| 648
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowercase__ :Tuple = TypeVar('T')
class snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowercase : T ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = data
__UpperCAmelCase : Node[T] | None = None
def __str__( self : int ):
'''simple docstring'''
return f'''{self.data}'''
class snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Node[T] | None = None
def __iter__( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.top
while node:
yield node.data
__UpperCAmelCase : Dict = node.next
def __str__( self : Any ):
'''simple docstring'''
return "->".join([str(__lowercase ) for item in self] )
def __len__( self : int ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def A_ ( self : Tuple ):
'''simple docstring'''
return self.top is None
def A_ ( self : List[str] , __lowercase : T ):
'''simple docstring'''
__UpperCAmelCase : int = Node(__lowercase )
if not self.is_empty():
__UpperCAmelCase : int = self.top
__UpperCAmelCase : Tuple = node
def A_ ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowercase )
__UpperCAmelCase : List[str] = self.top
__UpperCAmelCase : List[str] = self.top.next
return pop_node.data
def A_ ( self : str ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 522
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowercase__ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowercase__ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowercase__ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowercase__ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowercase__ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowercase__ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowercase__ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowercase__ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowercase__ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowercase__ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowercase__ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class lowercase__ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
| 717
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ : List[Any] = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
snake_case_ : str = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
snake_case_ : Optional[int] = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
snake_case_ : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = '''rwkv'''
UpperCAmelCase : Tuple = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : Dict , _UpperCAmelCase : Union[str, Any]=50_277 , _UpperCAmelCase : Tuple=1_024 , _UpperCAmelCase : Dict=4_096 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : int=None , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : Union[str, Any]=6 , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Optional[int] , ):
_A = vocab_size
_A = context_length
_A = hidden_size
_A = num_hidden_layers
_A = attention_hidden_size if attention_hidden_size is not None else hidden_size
_A = intermediate_size if intermediate_size is not None else 4 * hidden_size
_A = layer_norm_epsilon
_A = rescale_every
_A = use_cache
_A = bos_token_id
_A = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 7
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : int = 32 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_55 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCamelCase_ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCamelCase_ : bool = True , UpperCamelCase_ : int=7 , UpperCamelCase_ : Dict=30 , UpperCamelCase_ : Tuple=4_00 , UpperCamelCase_ : List[Any]=3 , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :List[str] = parent
SCREAMING_SNAKE_CASE__ :Tuple = do_resize
SCREAMING_SNAKE_CASE__ :List[Any] = size if size is not None else {'shortest_edge': 2_88}
SCREAMING_SNAKE_CASE__ :str = size_divisor
SCREAMING_SNAKE_CASE__ :Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ :Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE__ :str = do_normalize
SCREAMING_SNAKE_CASE__ :int = do_center_crop
SCREAMING_SNAKE_CASE__ :Optional[Any] = image_mean
SCREAMING_SNAKE_CASE__ :str = image_std
SCREAMING_SNAKE_CASE__ :Optional[Any] = do_pad
SCREAMING_SNAKE_CASE__ :Tuple = batch_size
SCREAMING_SNAKE_CASE__ :List[str] = num_channels
SCREAMING_SNAKE_CASE__ :Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ :Optional[Any] = max_resolution
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __lowerCamelCase ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ :Dict = self.size['shortest_edge']
SCREAMING_SNAKE_CASE__ :List[Any] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[int] = image.size
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ :Optional[int] = size / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :List[Any] = size, scale * w
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = scale * h, size
SCREAMING_SNAKE_CASE__ :Any = int((13_33 / 8_00) * size )
if max(UpperCamelCase_ , UpperCamelCase_ ) > max_size:
SCREAMING_SNAKE_CASE__ :Tuple = max_size / max(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = newh * scale
SCREAMING_SNAKE_CASE__ :Any = neww * scale
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ :Any = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
SCREAMING_SNAKE_CASE__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ :List[str] = BridgeTowerImageProcessingTester(self )
@property
def __lowerCamelCase ( self : int ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase_ , 'size_divisor' ) )
def __lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
pass
def __lowerCamelCase ( self : int ) -> Dict:
# Initialize image processor
SCREAMING_SNAKE_CASE__ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ :str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ :Any = image_processing(UpperCamelCase_ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : List[Any] ) -> Tuple:
# Initialize image processor
SCREAMING_SNAKE_CASE__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ :str = image_processing(UpperCamelCase_ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : List[str] ) -> List[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ :List[Any] = image_processing(UpperCamelCase_ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 209
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
A__ = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 219
|
from __future__ import annotations
def _lowerCAmelCase ( __lowerCAmelCase ) -> list[int]:
"""simple docstring"""
if len(__lowerCAmelCase ) == 0:
return array
snake_case__ , snake_case__ : int = min(__lowerCAmelCase ), max(__lowerCAmelCase )
# Compute the variables
snake_case__ : Tuple = _max - _min + 1
snake_case__ , snake_case__ : Dict = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
snake_case__ : Dict = i - _min
snake_case__ : List[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
snake_case__ : Optional[int] = 0
for i in range(__lowerCAmelCase ):
while holes_repeat[i] > 0:
snake_case__ : Dict = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ = input('''Enter numbers separated by comma:\n''')
A__ = [int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 219
| 1
|
def snake_case (UpperCamelCase : list[list[float]] ):
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(UpperCamelCase ):
if len(UpperCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCamelCase ) )
return data_lists
def snake_case (UpperCamelCase : list[list[float]] , UpperCamelCase : list[int] ):
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ = min(UpperCamelCase )
lowerCamelCase__ = max(UpperCamelCase )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = f'''Invalid weight of {weight:f} provided'''
raise ValueError(UpperCamelCase )
score_lists.append(UpperCamelCase )
return score_lists
def snake_case (UpperCamelCase : list[list[float]] ):
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCamelCase ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case (UpperCamelCase : list[list[float]] , UpperCamelCase : list[int] ):
'''simple docstring'''
lowerCamelCase__ = get_data(UpperCamelCase )
lowerCamelCase__ = calculate_each_score(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ = generate_final_scores(UpperCamelCase )
# append scores to source data
for i, ele in enumerate(UpperCamelCase ):
source_data[i].append(UpperCamelCase )
return source_data
| 165
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 'Wav2Vec2FeatureExtractor'
snake_case_ = 'AutoTokenizer'
def __init__( self : Tuple , a_ : Any , a_ : str ):
"""simple docstring"""
super().__init__(a_ , a_ )
lowerCamelCase__ = self.feature_extractor
lowerCamelCase__ = False
@classmethod
def _UpperCamelCase ( cls : List[str] , a_ : Optional[Any] , **a_ : int ):
"""simple docstring"""
try:
return super().from_pretrained(a_ , **a_ )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , a_ , )
lowerCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(a_ , **a_ )
lowerCamelCase__ = WavaVecaCTCTokenizer.from_pretrained(a_ , **a_ )
return cls(feature_extractor=a_ , tokenizer=a_ )
def __call__( self : List[str] , *a_ : int , **a_ : str ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
lowerCamelCase__ = kwargs.pop("""raw_speech""" )
else:
lowerCamelCase__ = kwargs.pop("""audio""" , a_ )
lowerCamelCase__ = kwargs.pop("""sampling_rate""" , a_ )
lowerCamelCase__ = kwargs.pop("""text""" , a_ )
if len(a_ ) > 0:
lowerCamelCase__ = args[0]
lowerCamelCase__ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowerCamelCase__ = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if text is not None:
lowerCamelCase__ = self.tokenizer(a_ , **a_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ = encodings["""input_ids"""]
return inputs
def _UpperCamelCase ( self : int , *a_ : List[Any] , **a_ : int ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*a_ , **a_ )
lowerCamelCase__ = kwargs.pop("""input_features""" , a_ )
lowerCamelCase__ = kwargs.pop("""labels""" , a_ )
if len(a_ ) > 0:
lowerCamelCase__ = args[0]
lowerCamelCase__ = args[1:]
if input_features is not None:
lowerCamelCase__ = self.feature_extractor.pad(a_ , *a_ , **a_ )
if labels is not None:
lowerCamelCase__ = self.tokenizer.pad(a_ , **a_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase__ = labels["""input_ids"""]
return input_features
def _UpperCamelCase ( self : str , *a_ : Tuple , **a_ : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self : Union[str, Any] , *a_ : Dict , **a_ : str ):
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@contextmanager
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
lowerCamelCase__ = True
lowerCamelCase__ = self.tokenizer
yield
lowerCamelCase__ = self.feature_extractor
lowerCamelCase__ = False
| 165
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase_ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase_ : Union[str, Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCAmelCase_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase_ : int = {'unk_token': '<unk>'}
lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
lowerCAmelCase_ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase_ : List[str] = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def __lowercase ( self : Any , **lowerCamelCase : Tuple ) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __lowercase ( self : Optional[Any] , **lowerCamelCase : str ) -> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __lowercase ( self : Tuple , **lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __lowercase ( self : int ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase_ : Tuple = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase_ : str = self.get_tokenizer()
lowerCAmelCase_ : str = self.get_rust_tokenizer()
lowerCAmelCase_ : Union[str, Any] = self.get_image_processor()
lowerCAmelCase_ : str = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
lowerCAmelCase_ : List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def __lowercase ( self : List[Any] ) -> str:
lowerCAmelCase_ : str = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCAmelCase_ : int = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def __lowercase ( self : Dict ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.get_image_processor()
lowerCAmelCase_ : Any = self.get_tokenizer()
lowerCAmelCase_ : int = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCAmelCase_ : str = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[int] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCAmelCase_ : Union[str, Any] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowercase ( self : Dict ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = self.get_image_processor()
lowerCAmelCase_ : Optional[int] = self.get_tokenizer()
lowerCAmelCase_ : List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCAmelCase_ : Tuple = 'lower newer'
lowerCAmelCase_ : Dict = processor(text=UpperCamelCase__ )
lowerCAmelCase_ : List[str] = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : Dict ) -> Any:
lowerCAmelCase_ : List[str] = self.get_image_processor()
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase_ : str = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCAmelCase_ : Optional[Any] = 'lower newer'
lowerCAmelCase_ : Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[Any] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : Union[str, Any] = self.get_image_processor()
lowerCAmelCase_ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ : Dict = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCAmelCase_ : Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase_ : List[Any] = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[Any] = processor(images=UpperCamelCase__ , visual_prompt=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __lowercase ( self : List[str] ) -> Dict:
lowerCAmelCase_ : List[str] = self.get_image_processor()
lowerCAmelCase_ : Any = self.get_tokenizer()
lowerCAmelCase_ : List[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCAmelCase_ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
lowerCAmelCase_ : List[Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 709
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : List[str] = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'timesformer'
def __init__( self : List[Any] , lowerCamelCase : List[Any]=2_24 , lowerCamelCase : List[str]=16 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : List[Any]=8 , lowerCamelCase : List[str]=7_68 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : Any=12 , lowerCamelCase : Any=30_72 , lowerCamelCase : str="gelu" , lowerCamelCase : Tuple=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : str=0.02 , lowerCamelCase : Any=1E-6 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Tuple="divided_space_time" , lowerCamelCase : int=0 , **lowerCamelCase : List[str] , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = image_size
lowerCAmelCase_ : Optional[Any] = patch_size
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : Any = num_frames
lowerCAmelCase_ : int = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Optional[int] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : Tuple = qkv_bias
lowerCAmelCase_ : List[Any] = attention_type
lowerCAmelCase_ : List[Any] = drop_path_rate
| 398
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
snake_case : List[Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
snake_case : List[str] = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
snake_case : Optional[Any] = '''▁'''
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
a :List[str] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
a :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
a :Optional[int] = vocab_file
a :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
a :str = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a :Dict = len(self.sp_model ) - 1
a :int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a :Union[str, Any] = [self.cls_token_id]
a :Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
a :Dict = [self.sep_token_id]
a :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a :Tuple = self.sp_model.PieceToId(_lowerCamelCase )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[Any] = []
a :Optional[Any] = ''''''
a :Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
a :int = True
a :Optional[int] = []
else:
current_sub_tokens.append(_lowerCamelCase )
a :str = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def __getstate__( self ):
a :Tuple = self.__dict__.copy()
a :Union[str, Any] = None
return state
def __setstate__( self , _lowerCamelCase ):
a :Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a :int = {}
a :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a :Tuple = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
a :Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 445
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=512 , _lowerCamelCase="cls" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a :Tuple = project_dim
a :Optional[int] = pooler_fn
a :int = learn_encoder
a :int = use_attention_mask
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = [r'pooler', r'logit_scale']
SCREAMING_SNAKE_CASE__ = [r'position_ids', r'predictions.decoder.bias']
SCREAMING_SNAKE_CASE__ = 'roberta'
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig
def __init__( self , _lowerCamelCase ):
super().__init__(_lowerCamelCase )
a :Tuple = XLMRobertaModel(_lowerCamelCase )
a :Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
a :Optional[int] = getattr(_lowerCamelCase , '''has_pre_transformation''' , _lowerCamelCase )
if self.has_pre_transformation:
a :Tuple = nn.Linear(config.hidden_size , config.project_dim )
a :Union[str, Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ):
a :Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
a :int = self.base_model(
input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , position_ids=_lowerCamelCase , head_mask=_lowerCamelCase , inputs_embeds=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , output_attentions=_lowerCamelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_lowerCamelCase , )
if self.has_pre_transformation:
a :Optional[int] = outputs['''hidden_states'''][-2]
a :List[Any] = self.pre_LN(_lowerCamelCase )
a :Optional[Any] = self.transformation_pre(_lowerCamelCase )
return TransformationModelOutput(
projection_state=_lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
a :List[str] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 445
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , snake_case_ : Optional[int] ):
snake_case__ : Any = data
snake_case__ : Node | None = None
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Any ):
snake_case__ : Any = None
snake_case__ : List[str] = None
def __iter__( self : List[Any] ):
snake_case__ : Dict = self.head
while self.head:
yield node.data
snake_case__ : Tuple = node.next
if node == self.head:
break
def __len__( self : str ):
return sum(1 for _ in self )
def __repr__( self : Any ):
return "->".join(str(lowercase_ ) for item in iter(self ) )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str ):
self.insert_nth(len(self ) , lowercase_ )
def lowerCamelCase ( self : Any , snake_case_ : Tuple ):
self.insert_nth(0 , lowercase_ )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : str ):
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
snake_case__ : List[str] = Node(lowercase_ )
if self.head is None:
snake_case__ : List[Any] = new_node # first node points itself
snake_case__ : Dict = new_node
elif index == 0: # insert at head
snake_case__ : Any = self.head
snake_case__ : str = new_node
else:
snake_case__ : List[Any] = self.head
for _ in range(index - 1 ):
snake_case__ : Optional[Any] = temp.next
snake_case__ : str = temp.next
snake_case__ : List[str] = new_node
if index == len(self ) - 1: # insert at tail
snake_case__ : Optional[int] = new_node
def lowerCamelCase ( self : Union[str, Any] ):
return self.delete_nth(0 )
def lowerCamelCase ( self : Tuple ):
return self.delete_nth(len(self ) - 1 )
def lowerCamelCase ( self : Dict , snake_case_ : Union[str, Any] = 0 ):
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
snake_case__ : str = self.head
if self.head == self.tail: # just one node
snake_case__ : List[str] = None
elif index == 0: # delete head node
snake_case__ : str = self.tail.next.next
snake_case__ : List[str] = self.head.next
else:
snake_case__ : Any = self.head
for _ in range(index - 1 ):
snake_case__ : str = temp.next
snake_case__ : Tuple = temp.next
snake_case__ : Dict = temp.next.next
if index == len(self ) - 1: # delete at tail
snake_case__ : int = temp
return delete_node.data
def lowerCamelCase ( self : Dict ):
return len(self ) == 0
def __snake_case( ) -> str:
snake_case__ : str = CircularLinkedList()
assert len(__SCREAMING_SNAKE_CASE ) == 0
assert circular_linked_list.is_empty() is True
assert str(__SCREAMING_SNAKE_CASE ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__SCREAMING_SNAKE_CASE ) == i
circular_linked_list.insert_nth(__SCREAMING_SNAKE_CASE , i + 1 )
assert str(__SCREAMING_SNAKE_CASE ) == "->".join(str(__SCREAMING_SNAKE_CASE ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__SCREAMING_SNAKE_CASE ) == "->".join(str(__SCREAMING_SNAKE_CASE ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__SCREAMING_SNAKE_CASE ) == "->".join(str(__SCREAMING_SNAKE_CASE ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__SCREAMING_SNAKE_CASE ) == "->".join(str(__SCREAMING_SNAKE_CASE ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__SCREAMING_SNAKE_CASE ) == "->".join(str(__SCREAMING_SNAKE_CASE ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : Optional[int] , snake_case_ : List[str]=7 , snake_case_ : Optional[Any]=3 , snake_case_ : Optional[Any]=18 , snake_case_ : Optional[Any]=30 , snake_case_ : Dict=400 , snake_case_ : List[Any]=True , snake_case_ : List[Any]=None , snake_case_ : Union[str, Any]=True , ):
snake_case__ : Any = size if size is not None else {"""height""": 18, """width""": 18}
snake_case__ : Dict = parent
snake_case__ : str = batch_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Tuple = min_resolution
snake_case__ : Any = max_resolution
snake_case__ : Optional[int] = do_resize
snake_case__ : List[str] = size
snake_case__ : int = apply_ocr
def lowerCamelCase ( self : Tuple ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCamelCase ( self : List[str] ):
snake_case__ : Optional[int] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCamelCase ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """apply_ocr""" ) )
def lowerCamelCase ( self : str ):
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCamelCase ( self : Any ):
pass
def lowerCamelCase ( self : List[Any] ):
# Initialize image_processing
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , snake_case_ )
self.assertIsInstance(encoding.boxes , snake_case_ )
# Test batched
snake_case__ : List[str] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase ( self : int ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase ( self : str ):
# Initialize image_processing
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[Any] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase ( self : Optional[Any] ):
# with apply_OCR = True
snake_case__ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ : Optional[int] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case__ : List[str] = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case__ : str = image_processing(snake_case_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ : Union[str, Any] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case__ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , snake_case_ )
self.assertListEqual(encoding.boxes , snake_case_ )
# with apply_OCR = False
snake_case__ : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=snake_case_ )
snake_case__ : List[Any] = image_processing(snake_case_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 301
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a: Dict = logging.get_logger(__name__)
__a: Optional[int] = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''efficientnet'''
def __init__( self : Dict , lowerCamelCase : int = 3 , lowerCamelCase : int = 600 , lowerCamelCase : float = 2.0 , lowerCamelCase : float = 3.1 , lowerCamelCase : int = 8 , lowerCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase : List[int] = [] , lowerCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase : float = 0.25 , lowerCamelCase : str = "swish" , lowerCamelCase : int = 2560 , lowerCamelCase : str = "mean" , lowerCamelCase : float = 0.02 , lowerCamelCase : float = 0.001 , lowerCamelCase : float = 0.99 , lowerCamelCase : float = 0.5 , lowerCamelCase : float = 0.2 , **lowerCamelCase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(lowerCamelCase ) * 4
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase ( self : Dict ) -> float:
"""simple docstring"""
return 1E-5
| 108
| 0
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCAmelCase__ = 100
lowerCAmelCase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCAmelCase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def _lowerCamelCase ( __a ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _lowerCamelCase ( __a = 5_000 ):
for number_to_partition in range(1, __a ):
if len(partition(__a ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 702
|
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return x.sum()
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return i + 1
@dataclass
class snake_case :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [1, 2]
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': [1, 2], '''b''': [3, 4]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 1}, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = [2, 3]
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': [2, 3], '''b''': [4, 5]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 2}, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = 2
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 0, '''c''': 2}
SCREAMING_SNAKE_CASE_ = {
'''a''': np.eye(2 ).astype(SCREAMING_SNAKE_CASE_ ),
'''b''': np.zeros(3 ).astype(SCREAMING_SNAKE_CASE_ ),
'''c''': np.ones(2 ).astype(SCREAMING_SNAKE_CASE_ ),
}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ): # can't pickle a local lambda
map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 1 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 3, '''b''': 4}
SCREAMING_SNAKE_CASE_ = {'''a''': 5, '''b''': 6}
SCREAMING_SNAKE_CASE_ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
class snake_case :
UpperCAmelCase__ = '''bar'''
SCREAMING_SNAKE_CASE_ = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(SCREAMING_SNAKE_CASE_ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''', [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowerCamelCase ( __a, __a, __a ):
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
SCREAMING_SNAKE_CASE_ = {F'{i}': i for i in range(__a )}
SCREAMING_SNAKE_CASE_ = map_nested(lambda __a : x + 10, __a, num_proc=__a, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class snake_case ( __lowercase ):
@require_tf
def _lowercase (self ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
SCREAMING_SNAKE_CASE_ = layers.Dense(2 )
def gen_random_output():
SCREAMING_SNAKE_CASE_ = tf.random.uniform((1, 3) )
return model(SCREAMING_SNAKE_CASE_ ).numpy()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _lowercase (self ):
"""simple docstring"""
import torch
def gen_random_output():
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(3 , 2 )
SCREAMING_SNAKE_CASE_ = torch.rand(1 , 3 )
return model(SCREAMING_SNAKE_CASE_ ).detach().numpy()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _lowercase (self ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''', [{}] )
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''', [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
], )
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).flatten()
assert output == expected_output
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = A(x=1, y='''foobar''' )
SCREAMING_SNAKE_CASE_ = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(__a ) == expected_output
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': A(x=10, y='''foo''' )}, '''c''': [A(x=20, y='''bar''' )]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(__a ) == expected_output
with pytest.raises(__a ):
asdict([1, A(x=10, y='''foo''' )] )
def _lowerCamelCase ( __a ):
return text.split()
def _lowerCamelCase ( __a ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowerCamelCase ( ):
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = []
for yield_time, content in iflatmap_unordered(
__a, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__a )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(__a ) == 4
| 628
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
def __init__( self: Any ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any]=13 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: Tuple=3 ,__lowerCAmelCase: Dict=4 ,__lowerCAmelCase: int=[10, 20, 30, 40] ,__lowerCAmelCase: str=[2, 2, 3, 2] ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Optional[int]="gelu" ,__lowerCAmelCase: List[Any]=10 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: str=["stage2", "stage3", "stage4"] ,__lowerCAmelCase: str=[2, 3, 4] ,__lowerCAmelCase: Optional[Any]=None ,):
'''simple docstring'''
_lowerCamelCase : List[str] = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : int = image_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Any = num_stages
_lowerCamelCase : Union[str, Any] = hidden_sizes
_lowerCamelCase : Union[str, Any] = depths
_lowerCamelCase : str = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : List[str] = num_labels
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Union[str, Any] = out_features
_lowerCamelCase : int = out_indices
_lowerCamelCase : Optional[int] = scope
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Optional[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCamelCase : int = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: int ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : int = ConvNextVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ConvNextVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: int ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Union[str, Any] = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = ConvNextVaModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self: List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
pass
def _lowercase ( self: Any ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_with_labels()
_lowerCamelCase : Union[str, Any] = True
if model_class.__name__ in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]:
continue
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Any = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
_lowerCamelCase : Tuple = model(**__lowerCAmelCase ).loss
loss.backward()
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_lowerCamelCase : Any = False
_lowerCamelCase : str = True
if (
model_class.__name__
in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_lowerCamelCase : List[Any] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
_lowerCamelCase : str = model(**__lowerCAmelCase ).loss
loss.backward()
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCamelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(__lowerCAmelCase: Any ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ):
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Any = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Any = ConvNextVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Dict ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = prepare_img()
_lowerCamelCase : Tuple = preprocessor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : str = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
| 46
|
'''simple docstring'''
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int = 1000 ):
UpperCAmelCase = -1
UpperCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCAmelCase = n - a - b
if c * c == (a * a + b * b):
UpperCAmelCase = a * b * c
if candidate >= product:
UpperCAmelCase = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 447
| 0
|
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_UpperCAmelCase : Any = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCamelCase ( lowercase_ : int , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Dict ) -> int:
'''simple docstring'''
for attribute in key.split('''.''' ):
lowercase =getattr(lowercase_ , lowercase_ )
if weight_type is not None:
lowercase =getattr(lowercase_ , lowercase_ ).shape
else:
lowercase =hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase =value
elif weight_type == "weight_g":
lowercase =value
elif weight_type == "weight_v":
lowercase =value
elif weight_type == "bias":
lowercase =value
else:
lowercase =value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCamelCase ( lowercase_ : str , lowercase_ : Any ) -> Any:
'''simple docstring'''
lowercase =[]
lowercase =fairseq_model.state_dict()
lowercase =hf_model.feature_extractor
lowercase =hf_model.adapter
for name, value in fairseq_dict.items():
lowercase =False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase =True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase =True
if "*" in mapped_key:
lowercase =name.split(lowercase_ )[0].split('''.''' )[-2]
lowercase =mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
lowercase ="""weight_g"""
elif "weight_v" in name:
lowercase ="""weight_v"""
elif "bias" in name:
lowercase ="""bias"""
elif "weight" in name:
lowercase ="""weight"""
else:
lowercase =None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCamelCase ( lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ) -> str:
'''simple docstring'''
lowercase =full_name.split('''conv_layers.''' )[-1]
lowercase =name.split('''.''' )
lowercase =int(items[0] )
lowercase =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase =value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase =value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase =value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase =value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase_ )
def UpperCamelCase ( lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Tuple ) -> List[str]:
'''simple docstring'''
lowercase =full_name.split('''adaptor.''' )[-1]
lowercase =name.split('''.''' )
if items[1].isdigit():
lowercase =int(items[1] )
else:
lowercase =None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
lowercase =value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
lowercase =value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
lowercase =value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
lowercase =value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(lowercase_ , lowercase_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
lowercase =value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
lowercase =value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(lowercase_ )
def UpperCamelCase ( lowercase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase =emb.weight.shape
lowercase =nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
lowercase =emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : int , ) -> str:
'''simple docstring'''
lowercase =WavaVecaConfig.from_pretrained(
lowercase_ , add_adapter=lowercase_ , adapter_stride=lowercase_ , adapter_kernel_size=lowercase_ , use_auth_token=lowercase_ , output_hidden_size=lowercase_ , )
lowercase =MBartConfig.from_pretrained(lowercase_ )
# load model
lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
lowercase =model[0].eval()
# load feature extractor
lowercase =WavaVecaFeatureExtractor.from_pretrained(lowercase_ , use_auth_token=lowercase_ )
# set weights for wav2vec2 encoder
lowercase =WavaVecaModel(lowercase_ )
recursively_load_weights_wavaveca(model.encoder , lowercase_ )
# load decoder weights
lowercase =MBartForCausalLM(lowercase_ )
lowercase =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowercase_ )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowercase =SpeechEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
lowercase =False
lowercase =MBartaaTokenizer(lowercase_ )
tokenizer.save_pretrained(lowercase_ )
lowercase =hf_wavavec.config.to_dict()
lowercase =tokenizer.pad_token_id
lowercase =tokenizer.bos_token_id
lowercase =tokenizer.eos_token_id
lowercase ="""mbart50"""
lowercase ="""wav2vec2"""
lowercase =tokenizer.eos_token_id
lowercase =2_5_0_0_0_4
lowercase =tokenizer.eos_token_id
lowercase =SpeechEncoderDecoderConfig.from_dict(lowercase_ )
hf_wavavec.save_pretrained(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=10_24, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=25_00_04, type=int, help='''`decoder_start_token_id` of model config''')
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 708
|
'''simple docstring'''
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_=None , snake_case_=None ):
lowercase =data
lowercase =previous
lowercase =next_node
def __str__( self ):
return f'{self.data}'
def _A( self ):
return self.data
def _A( self ):
return self.next
def _A( self ):
return self.previous
class __magic_name__ :
def __init__( self , snake_case_ ):
lowercase =head
def __iter__( self ):
return self
def _A( self ):
if not self.current:
raise StopIteration
else:
lowercase =self.current.get_data()
lowercase =self.current.get_next()
return value
class __magic_name__ :
def __init__( self ):
lowercase =None # First node in list
lowercase =None # Last node in list
def __str__( self ):
lowercase =self.head
lowercase =[]
while current is not None:
nodes.append(current.get_data() )
lowercase =current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self , snake_case_ ):
lowercase =self.head
while current:
if current.get_data() == value:
return True
lowercase =current.get_next()
return False
def __iter__( self ):
return LinkedListIterator(self.head )
def _A( self ):
if self.head:
return self.head.get_data()
return None
def _A( self ):
if self.tail:
return self.tail.get_data()
return None
def _A( self , snake_case_ ):
if self.head is None:
lowercase =node
lowercase =node
else:
self.insert_before_node(self.head , snake_case_ )
def _A( self , snake_case_ ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def _A( self , snake_case_ ):
lowercase =Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def _A( self , snake_case_ , snake_case_ ):
lowercase =node
lowercase =node.previous
if node.get_previous() is None:
lowercase =node_to_insert
else:
lowercase =node_to_insert
lowercase =node_to_insert
def _A( self , snake_case_ , snake_case_ ):
lowercase =node
lowercase =node.next
if node.get_next() is None:
lowercase =node_to_insert
else:
lowercase =node_to_insert
lowercase =node_to_insert
def _A( self , snake_case_ , snake_case_ ):
lowercase =1
lowercase =Node(snake_case_ )
lowercase =self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
lowercase =node.next
self.insert_after_node(self.tail , snake_case_ )
def _A( self , snake_case_ ):
lowercase =self.head
while node:
if node.get_data() == item:
return node
lowercase =node.get_next()
raise Exception('''Node not found''' )
def _A( self , snake_case_ ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
lowercase =self.head.get_next()
if node == self.tail:
lowercase =self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def _A( snake_case_ ):
if node.get_next():
lowercase =node.previous
if node.get_previous():
lowercase =node.next
lowercase =None
lowercase =None
def _A( self ):
return self.head is None
def UpperCamelCase ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase_ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(UpperCAmelCase__ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1e-3 ) )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase_ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(UpperCAmelCase__ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1e-3 ) )
| 390
|
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Any = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''data2vec-audio'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : List[str]=768 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : str=3072 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : int=1e-5 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Optional[int]=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase__ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase__ : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : str=16 , UpperCAmelCase__ : Any=19 , UpperCAmelCase__ : Optional[int]=5 , UpperCAmelCase__ : str=0.05 , UpperCAmelCase__ : Dict=10 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Optional[Any]="sum" , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[Any]=256 , UpperCAmelCase__ : str=(512, 512, 512, 512, 1500) , UpperCAmelCase__ : int=(5, 3, 3, 1, 1) , UpperCAmelCase__ : Union[str, Any]=(1, 2, 3, 1, 1) , UpperCAmelCase__ : int=512 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Optional[Any]:
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = feat_extract_activation
UpperCAmelCase_ = list(UpperCAmelCase__ )
UpperCAmelCase_ = list(UpperCAmelCase__ )
UpperCAmelCase_ = list(UpperCAmelCase__ )
UpperCAmelCase_ = conv_bias
UpperCAmelCase_ = num_conv_pos_embeddings
UpperCAmelCase_ = num_conv_pos_embedding_groups
UpperCAmelCase_ = conv_pos_kernel_size
UpperCAmelCase_ = len(self.conv_dim )
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = feat_proj_dropout
UpperCAmelCase_ = final_dropout
UpperCAmelCase_ = layerdrop
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ = mask_time_prob
UpperCAmelCase_ = mask_time_length
UpperCAmelCase_ = mask_time_min_masks
UpperCAmelCase_ = mask_feature_prob
UpperCAmelCase_ = mask_feature_length
UpperCAmelCase_ = mask_feature_min_masks
# ctc loss
UpperCAmelCase_ = ctc_loss_reduction
UpperCAmelCase_ = ctc_zero_infinity
# adapter
UpperCAmelCase_ = add_adapter
UpperCAmelCase_ = adapter_kernel_size
UpperCAmelCase_ = adapter_stride
UpperCAmelCase_ = num_adapter_layers
UpperCAmelCase_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ = list(UpperCAmelCase__ )
UpperCAmelCase_ = list(UpperCAmelCase__ )
UpperCAmelCase_ = list(UpperCAmelCase__ )
UpperCAmelCase_ = xvector_output_dim
@property
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
return math.prod(self.conv_stride )
| 390
| 1
|
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCAmelCase_ : List[Any] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCAmelCase_ : List[str] = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCAmelCase_ : Union[str, Any] = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def snake_case_ (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
_UpperCAmelCase : Union[str, Any] = spearmanr(_A , _A )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 711
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ : List[Any] = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( __a ):
snake_case : List[str] = """maskformer"""
snake_case : str = {"""hidden_size""": """mask_feature_size"""}
snake_case : Union[str, Any] = ["""resnet""", """swin"""]
snake_case : Optional[Any] = ["""detr"""]
def __init__(self , lowerCAmelCase__ = 2_5_6 , lowerCAmelCase__ = 2_5_6 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0.0_2 , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 2_0.0 , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_UpperCAmelCase : Optional[int] = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = backbone_config.pop("""model_type""" )
_UpperCAmelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : Any = config_class.from_dict(lowerCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_UpperCAmelCase : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
_UpperCAmelCase : List[Any] = (
decoder_config.pop("""model_type""" ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : int = CONFIG_MAPPING[decoder_type]
_UpperCAmelCase : List[str] = config_class.from_dict(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = backbone_config
_UpperCAmelCase : List[str] = decoder_config
# main feature dimension for the model
_UpperCAmelCase : str = fpn_feature_size
_UpperCAmelCase : Any = mask_feature_size
# initializer
_UpperCAmelCase : Tuple = init_std
_UpperCAmelCase : Union[str, Any] = init_xavier_std
# Hungarian matcher && loss
_UpperCAmelCase : Any = cross_entropy_weight
_UpperCAmelCase : int = dice_weight
_UpperCAmelCase : int = mask_weight
_UpperCAmelCase : int = use_auxiliary_loss
_UpperCAmelCase : Dict = no_object_weight
_UpperCAmelCase : List[Any] = output_auxiliary_logits
_UpperCAmelCase : Optional[Any] = self.decoder_config.encoder_attention_heads
_UpperCAmelCase : List[str] = self.decoder_config.num_hidden_layers
super().__init__(**lowerCAmelCase__ )
@classmethod
def snake_case_ (cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
return cls(
backbone_config=lowerCAmelCase__ , decoder_config=lowerCAmelCase__ , **lowerCAmelCase__ , )
def snake_case_ (self ):
_UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : str = self.backbone_config.to_dict()
_UpperCAmelCase : Any = self.decoder_config.to_dict()
_UpperCAmelCase : str = self.__class__.model_type
return output
| 156
| 0
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_a = None
try:
import msvcrt
except ImportError:
_a = None
try:
import fcntl
except ImportError:
_a = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_a = OSError
# Data
# ------------------------------------------------
_a = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
_a = "3.0.12"
_a = None
def lowerCAmelCase__() -> Optional[int]:
'''simple docstring'''
global _logger
lowerCamelCase__ = _logger or logging.getLogger(__name__ )
return _logger
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = lock_file
return None
def __str__( self ):
'''simple docstring'''
lowerCamelCase__ = F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
self.lock.release()
return None
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=-1 , __lowerCAmelCase=None ):
'''simple docstring'''
lowerCamelCase__ = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
lowerCamelCase__ = self.hash_filename_if_too_long(__lowerCAmelCase , __lowerCAmelCase )
# The path to the lock file.
lowerCamelCase__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowerCamelCase__ = None
# The default timeout value.
lowerCamelCase__ = timeout
# We use this lock primarily for the lock counter.
lowerCamelCase__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowerCamelCase__ = 0
return None
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = float(__lowerCAmelCase )
return None
def __lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
def __lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def __lowerCamelCase ( self , __lowerCAmelCase=None , __lowerCAmelCase=0.05 ):
'''simple docstring'''
if timeout is None:
lowerCamelCase__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowerCamelCase__ = id(self )
lowerCamelCase__ = self._lock_file
lowerCamelCase__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__lowerCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowerCamelCase__ = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __lowerCamelCase ( self , __lowerCAmelCase=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowerCamelCase__ = id(self )
lowerCamelCase__ = self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
lowerCamelCase__ = 0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=__lowerCAmelCase )
return None
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = os.path.basename(__lowerCAmelCase )
if len(__lowerCAmelCase ) > max_length and max_length > 0:
lowerCamelCase__ = os.path.dirname(__lowerCAmelCase )
lowerCamelCase__ = str(hash(__lowerCAmelCase ) )
lowerCamelCase__ = filename[: max_length - len(__lowerCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__lowerCAmelCase , __lowerCAmelCase )
else:
return path
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=-1 , __lowerCAmelCase=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(__lowerCAmelCase , timeout=__lowerCAmelCase , max_filename_length=__lowerCAmelCase )
lowerCamelCase__ = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowerCamelCase__ = os.open(self._lock_file , __lowerCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__lowerCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__lowerCAmelCase )
else:
lowerCamelCase__ = fd
return None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self._lock_file_fd
lowerCamelCase__ = None
msvcrt.locking(__lowerCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(__lowerCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=-1 , __lowerCAmelCase=None ):
'''simple docstring'''
lowerCamelCase__ = os.statvfs(os.path.dirname(__lowerCAmelCase ) ).f_namemax
super().__init__(__lowerCAmelCase , timeout=__lowerCAmelCase , max_filename_length=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowerCamelCase__ = os.open(self._lock_file , __lowerCAmelCase )
try:
fcntl.flock(__lowerCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__lowerCAmelCase )
else:
lowerCamelCase__ = fd
return None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self._lock_file_fd
lowerCamelCase__ = None
fcntl.flock(__lowerCAmelCase , fcntl.LOCK_UN )
os.close(__lowerCAmelCase )
return None
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowerCamelCase__ = os.open(self._lock_file , __lowerCAmelCase )
except OSError:
pass
else:
lowerCamelCase__ = fd
return None
def __lowerCamelCase ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
lowerCamelCase__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_a = None
if msvcrt:
_a = WindowsFileLock
elif fcntl:
_a = UnixFileLock
else:
_a = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 481
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' ,[
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__snake_case ,i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = _distribute_shards(**__snake_case )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' ,[
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = _split_gen_kwargs(__snake_case ,__snake_case )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' ,[
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__snake_case ):
_number_of_shards_in_gen_kwargs(__snake_case )
else:
lowerCamelCase__ = _number_of_shards_in_gen_kwargs(__snake_case )
assert out == expected
| 481
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] ={
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _A ( lowerCAmelCase ):
snake_case__ : Tuple = 'canine'
def __init__( self , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1_6384 , __lowerCAmelCase=16 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase=0Xe_000 , __lowerCAmelCase=0Xe_001 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase=8 , __lowerCAmelCase=1_6384 , __lowerCAmelCase=128 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = type_vocab_size
lowercase = layer_norm_eps
# Character config:
lowercase = downsampling_rate
lowercase = upsampling_kernel_size
lowercase = num_hash_functions
lowercase = num_hash_buckets
lowercase = local_transformer_stride
| 197
|
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowerCAmelCase : List[Any] ="""\
Text data.
Second line of data."""
__lowerCAmelCase : Any ="""file"""
@pytest.fixture(scope="""session""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
lowercase = bytes(lowerCAmelCase__ , """utf-8""" )
with zstd.open(lowerCAmelCase__ , """wb""" ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple ) -> Dict:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , lowerCAmelCase__ ) , """w""" ) as f:
f.write(lowerCAmelCase__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
lowercase = input_paths[compression_format]
lowercase = tmp_path / """cache"""
lowercase = DownloadConfig(cache_dir=lowerCAmelCase__ , extract_compressed_file=lowerCAmelCase__ )
lowercase = cached_path(lowerCAmelCase__ , download_config=lowerCAmelCase__ )
with open(lowerCAmelCase__ ) as f:
lowercase = f.read()
with open(lowerCAmelCase__ ) as f:
lowercase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] ) -> Any:
'''simple docstring'''
lowercase = """custom_cache"""
lowercase = """custom_extracted_dir"""
lowercase = tmp_path / """custom_extracted_path"""
if default_extracted:
lowercase = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , lowerCAmelCase__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowerCAmelCase__ ) )
lowercase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase = xz_file
lowercase = (
DownloadConfig(extract_compressed_file=lowerCAmelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCAmelCase__ )
)
lowercase = cached_path(lowerCAmelCase__ , download_config=lowerCAmelCase__ )
assert Path(lowerCAmelCase__ ).parent.parts[-2:] == expected
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] ) -> Tuple:
'''simple docstring'''
lowercase = str(Path(lowerCAmelCase__ ).resolve() )
assert cached_path(lowerCAmelCase__ ) == text_file
# relative path
lowercase = str(Path(lowerCAmelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCAmelCase__ ) == text_file
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> List[Any]:
'''simple docstring'''
lowercase = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(lowerCAmelCase__ ):
cached_path(lowerCAmelCase__ )
# relative path
lowercase = """./__missing_file__.txt"""
with pytest.raises(lowerCAmelCase__ ):
cached_path(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Union[str, Any]:
'''simple docstring'''
lowercase = get_from_cache(f'tmp://{tmpfs_file}' )
with open(lowerCAmelCase__ ) as f:
lowercase = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( ) -> str:
'''simple docstring'''
with pytest.raises(lowerCAmelCase__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple ) -> Any:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowerCAmelCase__ ):
http_get("""https://huggingface.co""" , temp_file=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Any:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowerCAmelCase__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowerCAmelCase__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
fsspec_head("""s3://huggingface.co""" )
| 197
| 1
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( _lowerCamelCase ):
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "FlavaImageProcessor"
snake_case_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[int] , lowercase_ : Tuple=None , lowercase_ : Tuple=None , **lowercase_ : str ):
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _A , )
snake_case_ = kwargs.pop('''feature_extractor''' )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_A , _A )
snake_case_ = self.image_processor
def __call__( self : Dict , lowercase_ : Union[str, Any] = None , lowercase_ : Union[str, Any] = None , lowercase_ : Any = True , lowercase_ : List[str] = False , lowercase_ : int = False , lowercase_ : Any = None , lowercase_ : Union[str, Any] = 0 , lowercase_ : Optional[Any] = None , lowercase_ : Dict = None , lowercase_ : Optional[Any] = None , lowercase_ : str = None , lowercase_ : Tuple = None , lowercase_ : Optional[int] = False , lowercase_ : List[str] = False , lowercase_ : str = False , lowercase_ : Dict = False , lowercase_ : str = True , lowercase_ : Union[str, Any] = None , **lowercase_ : Dict , ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case_ = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
if images is not None:
snake_case_ = self.image_processor(
_A , return_image_mask=_A , return_codebook_pixels=_A , return_tensors=_A , **_A , )
if text is not None and images is not None:
encoding.update(_A )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def A_ ( self : Tuple , *lowercase_ : Tuple , **lowercase_ : Any ):
return self.tokenizer.batch_decode(*_A , **_A )
def A_ ( self : Any , *lowercase_ : int , **lowercase_ : Optional[int] ):
return self.tokenizer.decode(*_A , **_A )
@property
def A_ ( self : List[Any] ):
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Dict ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _A , )
return self.image_processor_class
@property
def A_ ( self : Tuple ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _A , )
return self.image_processor
| 640
|
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowerCAmelCase(a : Optional[Any] , a : str=False ) -> Optional[Any]:
try:
_SCREAMING_SNAKE_CASE =os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_SCREAMING_SNAKE_CASE =default
else:
# KEY is set, convert it to True or False.
try:
_SCREAMING_SNAKE_CASE =strtobool(a )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
UpperCAmelCase_ : Any = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : int = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : List[str] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : List[str] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : Dict = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : Dict = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _lowerCAmelCase(a : Optional[int] ) -> List[Any]:
try:
import faiss # noqa
except ImportError:
_SCREAMING_SNAKE_CASE =unittest.skip('''test requires faiss''' )(a )
return test_case
def _lowerCAmelCase(a : List[Any] ) -> List[Any]:
try:
import regex # noqa
except ImportError:
_SCREAMING_SNAKE_CASE =unittest.skip('''test requires regex''' )(a )
return test_case
def _lowerCAmelCase(a : Tuple ) -> List[Any]:
try:
import elasticsearch # noqa
except ImportError:
_SCREAMING_SNAKE_CASE =unittest.skip('''test requires elasticsearch''' )(a )
return test_case
def _lowerCAmelCase(a : Dict ) -> List[str]:
try:
import sqlalchemy # noqa
except ImportError:
_SCREAMING_SNAKE_CASE =unittest.skip('''test requires sqlalchemy''' )(a )
return test_case
def _lowerCAmelCase(a : List[Any] ) -> List[str]:
if not config.TORCH_AVAILABLE:
_SCREAMING_SNAKE_CASE =unittest.skip('''test requires PyTorch''' )(a )
return test_case
def _lowerCAmelCase(a : Optional[int] ) -> Optional[Any]:
if not config.TF_AVAILABLE:
_SCREAMING_SNAKE_CASE =unittest.skip('''test requires TensorFlow''' )(a )
return test_case
def _lowerCAmelCase(a : int ) -> List[Any]:
if not config.JAX_AVAILABLE:
_SCREAMING_SNAKE_CASE =unittest.skip('''test requires JAX''' )(a )
return test_case
def _lowerCAmelCase(a : Optional[Any] ) -> Optional[Any]:
if not config.PIL_AVAILABLE:
_SCREAMING_SNAKE_CASE =unittest.skip('''test requires Pillow''' )(a )
return test_case
def _lowerCAmelCase(a : str ) -> Any:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(a )
else:
return test_case
def _lowerCAmelCase(a : List[Any] ) -> Union[str, Any]:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(a )
else:
return test_case
def _lowerCAmelCase(a : Tuple ) -> str:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(a )
else:
return test_case
def _lowerCAmelCase(a : List[str] ) -> List[str]:
def _require_spacy_model(a : int ):
try:
import spacy # noqa F401
spacy.load(a )
except ImportError:
return unittest.skip('''test requires spacy''' )(a )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(a ) )(a )
else:
return test_case
return _require_spacy_model
def _lowerCAmelCase(a : Optional[Any] ) -> int:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(a )
else:
return test_case
def _lowerCAmelCase(a : Tuple ) -> Optional[Any]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(a )
else:
return test_case
def _lowerCAmelCase(a : Optional[Any] ) -> int:
if not _run_slow_tests or _run_slow_tests == 0:
_SCREAMING_SNAKE_CASE =unittest.skip('''test is slow''' )(a )
return test_case
def _lowerCAmelCase(a : int ) -> Optional[int]:
if not _run_local_tests or _run_local_tests == 0:
_SCREAMING_SNAKE_CASE =unittest.skip('''test is local''' )(a )
return test_case
def _lowerCAmelCase(a : List[str] ) -> Union[str, Any]:
if not _run_packaged_tests or _run_packaged_tests == 0:
_SCREAMING_SNAKE_CASE =unittest.skip('''test is packaged''' )(a )
return test_case
def _lowerCAmelCase(a : Optional[int] ) -> Union[str, Any]:
if not _run_remote_tests or _run_remote_tests == 0:
_SCREAMING_SNAKE_CASE =unittest.skip('''test requires remote''' )(a )
return test_case
def _lowerCAmelCase(*a : str ) -> str:
def decorate(cls : Any ):
for name, fn in cls.__dict__.items():
if callable(a ) and name.startswith('''test''' ):
for decorator in decorators:
_SCREAMING_SNAKE_CASE =decorator(a )
setattr(cls , a , a )
return cls
return decorate
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
pass
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : str = 0
lowercase : List[Any] = 1
lowercase : List[str] = 2
@contextmanager
def _lowerCAmelCase(a : List[Any]=OfflineSimulationMode.CONNECTION_FAILS , a : Any=1E-16 ) -> Tuple:
_SCREAMING_SNAKE_CASE =requests.Session().request
def timeout_request(a : List[Any] , a : Union[str, Any] , a : Any , **a : List[Any] ):
# Change the url to an invalid url so that the connection hangs
_SCREAMING_SNAKE_CASE ='''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_SCREAMING_SNAKE_CASE =timeout
try:
return online_request(a , a , **a )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_SCREAMING_SNAKE_CASE =url
_SCREAMING_SNAKE_CASE =e.args[0]
_SCREAMING_SNAKE_CASE =(max_retry_error.args[0].replace('''10.255.255.1''' , f"""OfflineMock[{url}]""" ),)
_SCREAMING_SNAKE_CASE =(max_retry_error,)
raise
def raise_connection_error(a : List[str] , a : int , **a : str ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=a )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , a ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , a ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , a ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _lowerCAmelCase(*a : str , **a : Tuple ) -> List[Any]:
_SCREAMING_SNAKE_CASE =str(Path().resolve() )
with tempfile.TemporaryDirectory(*a , **a ) as tmp_dir:
try:
os.chdir(a )
yield
finally:
os.chdir(a )
@contextmanager
def _lowerCAmelCase() -> int:
import gc
gc.collect()
_SCREAMING_SNAKE_CASE =pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowerCAmelCase() -> Union[str, Any]:
import gc
gc.collect()
_SCREAMING_SNAKE_CASE =pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowerCAmelCase(a : int , a : Tuple ) -> List[Any]:
return deepcopy(a ).integers(0 , 100 , 10 ).tolist() == deepcopy(a ).integers(0 , 100 , 10 ).tolist()
def _lowerCAmelCase(a : Tuple ) -> List[Any]:
import decorator
from requests.exceptions import HTTPError
def _wrapper(a : Any , *a : Any , **a : Tuple ):
try:
return func(*a , **a )
except HTTPError as err:
if str(a ).startswith('''500''' ) or str(a ).startswith('''502''' ):
pytest.xfail(str(a ) )
raise err
return decorator.decorator(_wrapper , a )
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =returncode
_SCREAMING_SNAKE_CASE =stdout
_SCREAMING_SNAKE_CASE =stderr
async def _lowerCAmelCase(a : str , a : List[str] ) -> Optional[int]:
while True:
_SCREAMING_SNAKE_CASE =await stream.readline()
if line:
callback(a )
else:
break
async def _lowerCAmelCase(a : Union[str, Any] , a : int=None , a : int=None , a : Any=None , a : Any=False , a : int=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(a ) )
_SCREAMING_SNAKE_CASE =await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
def tee(a : Optional[int] , a : List[str] , a : Optional[Any] , a : Any="" ):
_SCREAMING_SNAKE_CASE =line.decode('''utf-8''' ).rstrip()
sink.append(a )
if not quiet:
print(a , a , file=a )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda a : tee(a , a , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda a : tee(a , a , sys.stderr , label='''stderr:''' ) ),
] , timeout=a , )
return _RunOutput(await p.wait() , a , a )
def _lowerCAmelCase(a : Tuple , a : List[str]=None , a : Dict=None , a : int=180 , a : List[str]=False , a : Dict=True ) -> _RunOutput:
_SCREAMING_SNAKE_CASE =asyncio.get_event_loop()
_SCREAMING_SNAKE_CASE =loop.run_until_complete(
_stream_subprocess(a , env=a , stdin=a , timeout=a , quiet=a , echo=a ) )
_SCREAMING_SNAKE_CASE =''' '''.join(a )
if result.returncode > 0:
_SCREAMING_SNAKE_CASE ='''\n'''.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def _lowerCAmelCase() -> Optional[Any]:
_SCREAMING_SNAKE_CASE =os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
_SCREAMING_SNAKE_CASE =re.sub(R'''^gw''' , '''''' , a , 0 , re.M )
return int(a )
def _lowerCAmelCase() -> Union[str, Any]:
_SCREAMING_SNAKE_CASE =2_9500
_SCREAMING_SNAKE_CASE =pytest_xdist_worker_id()
return port + uniq_delta
| 255
| 0
|
from collections.abc import Sequence
def __UpperCamelCase ( A , A = False ):
if not arr:
return 0
UpperCamelCase__ = 0 if allow_empty_subarrays else float('''-inf''' )
UpperCamelCase__ = 0.0
for num in arr:
UpperCamelCase__ = max(0 if allow_empty_subarrays else num , curr_sum + num )
UpperCamelCase__ = max(A , A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__magic_name__ =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 713
|
def __UpperCamelCase ( A = 600851475143 ):
try:
UpperCamelCase__ = int(A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
UpperCamelCase__ = 1
UpperCamelCase__ = 2
while i * i <= n:
while n % i == 0:
UpperCamelCase__ = i
n //= i
i += 1
if n > 1:
UpperCamelCase__ = n
return int(A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 469
| 0
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
'''simple docstring'''
_UpperCAmelCase : Any = 4_2
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ,UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , lowercase : List[Any] = 16 , lowercase : List[str] = 88 , lowercase : Union[str, Any] = None , lowercase : Union[str, Any] = None , lowercase : Optional[int] = 1 , lowercase : Dict = 0.0 , lowercase : Tuple = 32 , lowercase : Dict = None , lowercase : Optional[int] = False , lowercase : Tuple = None , lowercase : Any = "geglu" , lowercase : List[str] = True , lowercase : int = True , ):
'''simple docstring'''
super().__init__()
_snake_case = num_attention_heads
_snake_case = attention_head_dim
_snake_case = num_attention_heads * attention_head_dim
_snake_case = in_channels
_snake_case = torch.nn.GroupNorm(num_groups=SCREAMING_SNAKE_CASE_ , num_channels=SCREAMING_SNAKE_CASE_ , eps=1E-6 , affine=SCREAMING_SNAKE_CASE_ )
_snake_case = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. Define transformers blocks
_snake_case = nn.ModuleList(
[
BasicTransformerBlock(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , cross_attention_dim=SCREAMING_SNAKE_CASE_ , activation_fn=SCREAMING_SNAKE_CASE_ , attention_bias=SCREAMING_SNAKE_CASE_ , double_self_attention=SCREAMING_SNAKE_CASE_ , norm_elementwise_affine=SCREAMING_SNAKE_CASE_ , )
for d in range(SCREAMING_SNAKE_CASE_ )
] )
_snake_case = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A ( self : List[Any] , lowercase : Tuple , lowercase : List[str]=None , lowercase : Optional[int]=None , lowercase : Any=None , lowercase : Tuple=1 , lowercase : Tuple=None , lowercase : List[Any] = True , ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case , _snake_case = hidden_states.shape
_snake_case = batch_frames // num_frames
_snake_case = hidden_states
_snake_case = hidden_states[None, :].reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_snake_case = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
_snake_case = self.norm(SCREAMING_SNAKE_CASE_ )
_snake_case = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_snake_case = self.proj_in(SCREAMING_SNAKE_CASE_ )
# 2. Blocks
for block in self.transformer_blocks:
_snake_case = block(
SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , cross_attention_kwargs=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ , )
# 3. Output
_snake_case = self.proj_out(SCREAMING_SNAKE_CASE_ )
_snake_case = (
hidden_states[None, None, :]
.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
_snake_case = hidden_states.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_snake_case = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=SCREAMING_SNAKE_CASE_ )
| 686
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowerCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vqvae_and_unet
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 )
lowerCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = self.dummy_unet_condition
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = torch.rand((1, 1, 10) )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = torch_device
lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 42
| 0
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCAmelCase_ : Optional[Any] = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowerCAmelCase_ : Tuple = (
subprocess.check_output(F'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('''utf-8''').split()
)
lowerCAmelCase_ : Any = '''|'''.join(sys.argv[1:])
lowerCAmelCase_ : Optional[Any] = re.compile(RF'^({joined_dirs}).*?\.py$')
lowerCAmelCase_ : List[Any] = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 378
|
"""simple docstring"""
import random
from typing import Any
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
for _ in range(len(lowerCAmelCase ) ):
UpperCAmelCase = random.randint(0 , len(lowerCAmelCase ) - 1 )
UpperCAmelCase = random.randint(0 , len(lowerCAmelCase ) - 1 )
UpperCAmelCase , UpperCAmelCase = data[b], data[a]
return data
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase_ : List[str] = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 378
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( a_ = 1 , a_ = 1000 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Optional[int] = 0
for divide_by_number in range(a_ , digit + 1 ):
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : Any = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(a_ ):
SCREAMING_SNAKE_CASE : List[Any] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = divide_by_number
else:
has_been_divided.append(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = {}
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Optional[Any] = "llama"
snake_case__ : List[str] = ["past_key_values"]
def __init__( self , lowercase__=32_000 , lowercase__=4_096 , lowercase__=11_008 , lowercase__=32 , lowercase__=32 , lowercase__=None , lowercase__="silu" , lowercase__=2_048 , lowercase__=0.0_2 , lowercase__=1E-6 , lowercase__=True , lowercase__=0 , lowercase__=1 , lowercase__=2 , lowercase__=1 , lowercase__=False , lowercase__=None , **lowercase__ , ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = num_key_value_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = rms_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = pretraining_tp
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__ , )
def _UpperCamelCase ( self ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"""got {self.rope_scaling}""" )
SCREAMING_SNAKE_CASE : Dict = self.rope_scaling.get('type' , lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.rope_scaling.get('factor' , lowercase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase__ , lowercase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 251
| 1
|
'''simple docstring'''
def _a ( __lowerCAmelCase : list[int] , __lowerCAmelCase : str ):
"""simple docstring"""
snake_case__ : Any = int(__lowerCAmelCase )
# Initialize Result
snake_case__ : Any = []
# Traverse through all denomination
for denomination in reversed(__lowerCAmelCase ):
# Find denominations
while int(__lowerCAmelCase ) >= int(__lowerCAmelCase ):
total_value -= int(__lowerCAmelCase )
answer.append(__lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Optional[Any] = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
lowerCAmelCase__ : Any = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
lowerCAmelCase__ : Tuple = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase__ : int = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase__ : List[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f"""Following is minimal change for {value}: """)
lowerCAmelCase__ : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 715
|
'''simple docstring'''
def _a ( __lowerCAmelCase : int ):
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case__ : Any = 4
snake_case__ : int = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ : Tuple = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 502
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.