code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _UpperCamelCase (_lowerCamelCase : Union[dict, list, tuple, torch.Tensor] )-> List[Tuple[int, ...]]:
'''simple docstring'''
__snake_case = []
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(_lowerCamelCase ) )
elif isinstance(_lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_lowerCamelCase ) )
elif isinstance(_lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Tuple[int, ...] )-> Tuple[int, ...]:
'''simple docstring'''
__snake_case = []
for d in reversed(_lowerCamelCase ):
idx.append(flat_idx % d )
__snake_case = flat_idx // d
return tuple(reversed(_lowerCamelCase ) )
@torch.jit.ignore
def _UpperCamelCase (_lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Optional[Sequence[bool]] = None , _lowerCamelCase : Optional[Sequence[bool]] = None , )-> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(_lowerCamelCase : List[bool] ) -> None:
__snake_case = True
for i in range(len(_lowerCamelCase ) ):
__snake_case = -1 * (i + 1)
l[reversed_idx] &= tally
__snake_case = l[reversed_idx]
if start_edges is None:
__snake_case = [s == 0 for s in start]
reduce_edge_list(_lowerCamelCase )
if end_edges is None:
__snake_case = [e == (d - 1) for e, d in zip(_lowerCamelCase , _lowerCamelCase )]
reduce_edge_list(_lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_lowerCamelCase ) == 0:
return [()]
elif len(_lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__snake_case = []
__snake_case = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_lowerCamelCase , _lowerCamelCase ):
if s == e:
path_list.append(slice(_lowerCamelCase , s + 1 ) )
else:
break
__snake_case = tuple(_lowerCamelCase )
__snake_case = len(_lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(_lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__snake_case = start[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__snake_case = end[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__snake_case = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _UpperCamelCase (_lowerCamelCase : torch.Tensor , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> torch.Tensor:
'''simple docstring'''
__snake_case = t.shape[:no_batch_dims]
__snake_case = list(_flat_idx_to_idx(_lowerCamelCase , _lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
__snake_case = list(_flat_idx_to_idx(flat_end - 1 , _lowerCamelCase ) )
# Get an ordered list of slices to perform
__snake_case = _get_minimal_slice_set(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
__snake_case = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _UpperCamelCase (_lowerCamelCase : Callable , _lowerCamelCase : Dict[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool = False , _lowerCamelCase : Any = None , _lowerCamelCase : bool = False , )-> Any:
'''simple docstring'''
if not (len(_lowerCamelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
__snake_case = [shape[:no_batch_dims] for shape in _fetch_dims(_lowerCamelCase )]
__snake_case = tuple([max(_lowerCamelCase ) for s in zip(*_lowerCamelCase )] )
def _prep_inputs(_lowerCamelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__snake_case = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__snake_case = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__snake_case = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__snake_case = tensor_tree_map(_prep_inputs , _lowerCamelCase )
__snake_case = None
if _out is not None:
__snake_case = tensor_tree_map(lambda _lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__snake_case = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__snake_case = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowerCamelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__snake_case = 0
__snake_case = prepped_outputs
for _ in range(_lowerCamelCase ):
# Chunk the input
if not low_mem:
__snake_case = _select_chunk
else:
__snake_case = partial(
_chunk_slice , flat_start=_lowerCamelCase , flat_end=min(_lowerCamelCase , i + chunk_size ) , no_batch_dims=len(_lowerCamelCase ) , )
__snake_case = tensor_tree_map(_lowerCamelCase , _lowerCamelCase )
# Run the layer on the chunk
__snake_case = layer(**_lowerCamelCase )
# Allocate space for the output
if out is None:
__snake_case = tensor_tree_map(lambda _lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(_lowerCamelCase , _lowerCamelCase ):
def assign(_lowerCamelCase : dict , _lowerCamelCase : dict ) -> None:
for k, v in da.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
assign(_lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__snake_case = da[k]
assign(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
for xa, xa in zip(_lowerCamelCase , _lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__snake_case = xa
elif isinstance(_lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__snake_case = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
__snake_case = tensor_tree_map(lambda _lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , _lowerCamelCase )
return out
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE = 512 , ) -> List[Any]:
'''simple docstring'''
__snake_case = max_chunk_size
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__snake_case = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__snake_case = [c for c in candidates if c > min_chunk_size]
__snake_case = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__SCREAMING_SNAKE_CASE ) -> bool:
try:
with torch.no_grad():
fn(*__SCREAMING_SNAKE_CASE , chunk_size=__SCREAMING_SNAKE_CASE )
return True
except RuntimeError:
return False
__snake_case = 0
__snake_case = len(__SCREAMING_SNAKE_CASE ) - 1
while i > min_viable_chunk_size_index:
__snake_case = test_chunk_size(candidates[i] )
if not viable:
__snake_case = (min_viable_chunk_size_index + i) // 2
else:
__snake_case = i
__snake_case = (i + len(__SCREAMING_SNAKE_CASE ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
__snake_case = True
for aa, aa in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert type(__SCREAMING_SNAKE_CASE ) == type(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
consistent &= self._compare_arg_caches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [v for _, v in sorted(aa.items() , key=lambda __SCREAMING_SNAKE_CASE : x[0] )]
__snake_case = [v for _, v in sorted(aa.items() , key=lambda __SCREAMING_SNAKE_CASE : x[0] )]
consistent &= self._compare_arg_caches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
consistent &= aa == aa
return consistent
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
__snake_case = True
__snake_case = tree_map(lambda __SCREAMING_SNAKE_CASE : a.shape if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) else a , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__SCREAMING_SNAKE_CASE )
__snake_case = self._compare_arg_caches(self.cached_arg_data , __SCREAMING_SNAKE_CASE )
else:
# Otherwise, we can reuse the precomputed value
__snake_case = False
if not consistent:
__snake_case = self._determine_favorable_chunk_size(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
__snake_case = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 24
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Any = DDIMPipeline
a__ : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
a__ : Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
a__ : Optional[int] = False
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
__UpperCamelCase :Dict = DDIMScheduler()
__UpperCamelCase :int = {'''unet''': unet, '''scheduler''': scheduler}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Tuple:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Optional[int] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :str = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :int = '''cpu'''
__UpperCamelCase :Any = self.get_dummy_components()
__UpperCamelCase :Any = self.pipeline_class(**__lowercase)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :Union[str, Any] = pipe(**__lowercase).images
__UpperCamelCase :Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3))
__UpperCamelCase :List[str] = np.array(
[1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4])
__UpperCamelCase :int = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(__lowercase , 1E-3)
def UpperCamelCase__ ( self) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Any:
super().test_save_load_local(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :int = '''google/ddpm-cifar10-32'''
__UpperCamelCase :str = UNetaDModel.from_pretrained(__lowercase)
__UpperCamelCase :int = DDIMScheduler()
__UpperCamelCase :Optional[Any] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase)
ddim.to(__lowercase)
ddim.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = torch.manual_seed(0)
__UpperCamelCase :Tuple = ddim(generator=__lowercase , eta=0.0 , output_type='''numpy''').images
__UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[str] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :int = '''google/ddpm-ema-bedroom-256'''
__UpperCamelCase :Optional[Any] = UNetaDModel.from_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = DDIMScheduler.from_pretrained(__lowercase)
__UpperCamelCase :Optional[int] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase)
ddpm.to(__lowercase)
ddpm.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = torch.manual_seed(0)
__UpperCamelCase :Optional[int] = ddpm(generator=__lowercase , output_type='''numpy''').images
__UpperCamelCase :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCamelCase :Any = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 167
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( snake_case__ ) -> None:
lowerCamelCase , lowerCamelCase = analyze_text(snake_case__ )
lowerCamelCase = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase = sum(single_char_strings.values() )
# one length string
lowerCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase = single_char_strings[ch]
lowerCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(snake_case__ ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
lowerCamelCase = sum(two_char_strings.values() )
lowerCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase = cha + cha
if sequence in two_char_strings:
lowerCamelCase = two_char_strings[sequence]
lowerCamelCase = int(snake_case__ ) / all_sum
my_sec_sum += prob * math.loga(snake_case__ )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def a__ ( snake_case__ ) -> tuple[dict, dict]:
lowerCamelCase = Counter() # type: ignore
lowerCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(snake_case__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ) -> int:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 720
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.dummy_uncond_unet
lowerCamelCase = ScoreSdeVeScheduler()
lowerCamelCase = ScoreSdeVePipeline(unet=_a , scheduler=_a )
sde_ve.to(_a )
sde_ve.set_progress_bar_config(disable=_a )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_a ).images
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_a , return_dict=_a )[
0
]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """google/ncsnpp-church-256"""
lowerCamelCase = UNetaDModel.from_pretrained(_a )
lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(_a )
lowerCamelCase = ScoreSdeVePipeline(unet=_a , scheduler=_a )
sde_ve.to(_a )
sde_ve.set_progress_bar_config(disable=_a )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=_a ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 533
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str=1_3 , UpperCamelCase__ : int=7 , UpperCamelCase__ : int=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[int]=9_9 , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Any="None" , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Any=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = relative_attention
UpperCamelCase = position_biased_input
UpperCamelCase = pos_att_type
UpperCamelCase = scope
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = TFDebertaVaModel(config=UpperCamelCase__ )
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase = [input_ids, input_mask]
UpperCamelCase = model(UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFDebertaVaForMaskedLM(config=UpperCamelCase__ )
UpperCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFDebertaVaForSequenceClassification(config=UpperCamelCase__ )
UpperCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFDebertaVaForTokenClassification(config=UpperCamelCase__ )
UpperCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = TFDebertaVaForQuestionAnswering(config=UpperCamelCase__ )
UpperCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = TFDebertaVaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def A ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
UpperCamelCase = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
UpperCamelCase = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 )
| 430
|
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> bool:
"""simple docstring"""
UpperCamelCase = len(A__ )
UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 430
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self ):
__UpperCamelCase : int = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__UpperCamelCase : List[str] = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__UpperCamelCase : str = model(_lowerCamelCase )["last_hidden_state"]
__UpperCamelCase : List[Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice.
__UpperCamelCase : Optional[Any] = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 715
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _UpperCamelCase ( _a : NDArray[floataa] , _a : NDArray[floataa] , _a : list[int] , _a : int , ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
__UpperCamelCase : Tuple = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_a )
if colsa != 1:
__UpperCamelCase : List[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_a )
if rowsa != rowsa:
__UpperCamelCase : Optional[int] = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_a )
if len(_a ) != rowsa:
__UpperCamelCase : Union[str, Any] = (
'Number of initial values must be equal to number of rows in coefficient '
f"""matrix but received {len(_a )} and {rowsa}"""
)
raise ValueError(_a )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
__UpperCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = table.shape
strictly_diagonally_dominant(_a )
# Iterates the whole matrix for given number of times
for _ in range(_a ):
__UpperCamelCase : List[Any] = []
for row in range(_a ):
__UpperCamelCase : List[Any] = 0
for col in range(_a ):
if col == row:
__UpperCamelCase : Optional[int] = table[row][col]
elif col == cols - 1:
__UpperCamelCase : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCamelCase : Any = (temp + val) / denom
new_val.append(_a )
__UpperCamelCase : List[Any] = new_val
return [float(_a ) for i in new_val]
def _UpperCamelCase ( _a : NDArray[floataa] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : str = table.shape
__UpperCamelCase : str = True
for i in range(0 , _a ):
__UpperCamelCase : Optional[Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287
| 0
|
from __future__ import annotations
def _lowercase ( a__ : str ) -> list[int]:
"""simple docstring"""
return [ord(a__ ) - 96 for elem in plain]
def _lowercase ( a__ : list[int] ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def _lowercase ( ) -> None:
"""simple docstring"""
_UpperCamelCase = encode(input("-> " ).strip().lower() )
print("Encoded: " , a__ )
print("Decoded:" , decode(a__ ) )
if __name__ == "__main__":
main()
| 147
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
__lowerCAmelCase = TypeVar("""U""")
class lowerCamelCase_ ( Generic[T, U] ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = key
_UpperCamelCase = val
_UpperCamelCase = None
_UpperCamelCase = None
def __repr__( self ) -> str:
"""simple docstring"""
return (
f'''Node: key: {self.key}, val: {self.val}, '''
f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase_ ( Generic[T, U] ):
def __init__( self ) -> None:
"""simple docstring"""
_UpperCamelCase = DoubleLinkedListNode(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = DoubleLinkedListNode(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase , _UpperCamelCase = self.rear, self.head
def __repr__( self ) -> str:
"""simple docstring"""
_UpperCamelCase = ["DoubleLinkedList"]
_UpperCamelCase = self.head
while node.next is not None:
rep.append(str(lowerCamelCase_ ) )
_UpperCamelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ ) -> None:
"""simple docstring"""
_UpperCamelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_UpperCamelCase = node
_UpperCamelCase = previous
_UpperCamelCase = node
_UpperCamelCase = self.rear
def lowercase ( self , lowerCamelCase_ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_UpperCamelCase = node.next
_UpperCamelCase = node.prev
_UpperCamelCase = None
_UpperCamelCase = None
return node
class lowerCamelCase_ ( Generic[T, U] ):
__lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = DoubleLinkedList()
_UpperCamelCase = capacity
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = {}
def __repr__( self ) -> str:
"""simple docstring"""
return (
f'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
f'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , lowerCamelCase_ ) -> bool:
"""simple docstring"""
return key in self.cache
def lowercase ( self , lowerCamelCase_ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_UpperCamelCase = self.cache[key]
_UpperCamelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase_ )
return node.val
self.miss += 1
return None
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_UpperCamelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_UpperCamelCase = DoubleLinkedListNode(lowerCamelCase_ , lowerCamelCase_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_UpperCamelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_UpperCamelCase = value
self.list.add(lowerCamelCase_ )
@classmethod
def lowercase ( cls , lowerCamelCase_ = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowerCamelCase_ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase_ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_UpperCamelCase = LRUCache(lowerCamelCase_ )
_UpperCamelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_UpperCamelCase = func(*lowerCamelCase_ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase_ , "cache_info" , lowerCamelCase_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : int = 1000 ):
__a = 2**power
__a = 0
while n:
__a , __a = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 703
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def SCREAMING_SNAKE_CASE ( a_ : Tuple ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
def SCREAMING_SNAKE_CASE ( a_ : str ):
# word like '180' or '身高' or '神'
for char in word:
__a = ord(a_ )
if not _is_chinese_char(a_ ):
return 0
return 1
def SCREAMING_SNAKE_CASE ( a_ : List[str] ):
__a = set()
for token in tokens:
__a = len(a_ ) > 1 and is_chinese(a_ )
if chinese_word:
word_set.add(a_ )
__a = list(a_ )
return word_list
def SCREAMING_SNAKE_CASE ( a_ : List[str] , a_ : set() ):
if not chinese_word_set:
return bert_tokens
__a = max([len(a_ ) for w in chinese_word_set] )
__a = bert_tokens
__a , __a = 0, len(a_ )
while start < end:
__a = True
if is_chinese(bert_word[start] ):
__a = min(end - start , a_ )
for i in range(a_ , 1 , -1 ):
__a = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__a = '##' + bert_word[j]
__a = start + i
__a = False
break
if single_word:
start += 1
return bert_word
def SCREAMING_SNAKE_CASE ( a_ : List[str] , a_ : LTP , a_ : BertTokenizer ):
__a = []
for i in range(0 , len(a_ ) , 100 ):
__a = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__a = [get_chinese_word(a_ ) for r in res]
ltp_res.extend(a_ )
assert len(a_ ) == len(a_ )
__a = []
for i in range(0 , len(a_ ) , 100 ):
__a = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a_ , truncation=a_ , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(a_ ) == len(a_ )
__a = []
for input_ids, chinese_word in zip(a_ , a_ ):
__a = []
for id in input_ids:
__a = bert_tokenizer._convert_id_to_token(a_ )
input_tokens.append(a_ )
__a = add_sub_symbol(a_ , a_ )
__a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a_ ):
if token[:2] == "##":
__a = token[2:]
# save chinese tokens' pos
if len(a_ ) == 1 and _is_chinese_char(ord(a_ ) ):
ref_id.append(a_ )
ref_ids.append(a_ )
assert len(a_ ) == len(a_ )
return ref_ids
def SCREAMING_SNAKE_CASE ( a_ : str ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__a = f.readlines()
__a = [line.strip() for line in data if len(a_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__a = LTP(args.ltp ) # faster in GPU device
__a = BertTokenizer.from_pretrained(args.bert )
__a = prepare_ref(a_ , a_ , a_ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__a = [json.dumps(a_ ) + '\n' for ref in ref_ids]
f.writelines(a_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase_ = parser.parse_args()
main(args)
| 490
| 0
|
from __future__ import annotations
def lowerCamelCase( a__):
create_state_space_tree(a__ ,[] ,0 ,[0 for i in range(len(a__))])
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,):
if index == len(a__):
print(a__)
return
for i in range(len(a__)):
if not index_used[i]:
current_sequence.append(sequence[i])
_SCREAMING_SNAKE_CASE =True
create_state_space_tree(a__ ,a__ ,index + 1 ,a__)
current_sequence.pop()
_SCREAMING_SNAKE_CASE =False
snake_case_ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case_ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 691
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 691
| 1
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __snake_case ( a ):
UpperCAmelCase__ : List[Any] = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ : str = '''OwlViTImageProcessor'''
UpperCAmelCase__ : str = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[Any] , _snake_case : int=None , _snake_case : List[Any]=None , **_snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
UpperCAmelCase_ = kwargs.pop('''feature_extractor''')
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(_snake_case , _snake_case)
def __call__( self : str , _snake_case : Tuple=None , _snake_case : Optional[Any]=None , _snake_case : List[Any]=None , _snake_case : Dict="max_length" , _snake_case : int="np" , **_snake_case : Tuple):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''')
if text is not None:
if isinstance(_snake_case , _snake_case) or (isinstance(_snake_case , _snake_case) and not isinstance(text[0] , _snake_case)):
UpperCAmelCase_ = [self.tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case , **_snake_case)]
elif isinstance(_snake_case , _snake_case) and isinstance(text[0] , _snake_case):
UpperCAmelCase_ = []
# Maximum number of queries across batch
UpperCAmelCase_ = max([len(_snake_case) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(_snake_case) != max_num_queries:
UpperCAmelCase_ = t + [''' '''] * (max_num_queries - len(_snake_case))
UpperCAmelCase_ = self.tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case , **_snake_case)
encodings.append(_snake_case)
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''')
if return_tensors == "np":
UpperCAmelCase_ = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0)
UpperCAmelCase_ = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase_ = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0)
UpperCAmelCase_ = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase_ = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0)
UpperCAmelCase_ = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase_ = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0)
UpperCAmelCase_ = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
else:
raise ValueError('''Target return tensor type could not be returned''')
UpperCAmelCase_ = BatchEncoding()
UpperCAmelCase_ = input_ids
UpperCAmelCase_ = attention_mask
if query_images is not None:
UpperCAmelCase_ = BatchEncoding()
UpperCAmelCase_ = self.image_processor(
_snake_case , return_tensors=_snake_case , **_snake_case).pixel_values
UpperCAmelCase_ = query_pixel_values
if images is not None:
UpperCAmelCase_ = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case)
if text is not None and images is not None:
UpperCAmelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case) , tensor_type=_snake_case)
def lowerCamelCase ( self : Dict , *_snake_case : int , **_snake_case : Tuple):
"""simple docstring"""
return self.image_processor.post_process(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Tuple , *_snake_case : List[Any] , **_snake_case : List[str]):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Optional[Any] , *_snake_case : Union[str, Any] , **_snake_case : Optional[int]):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Optional[Any] , *_snake_case : Optional[Any] , **_snake_case : Dict):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Tuple , *_snake_case : Any , **_snake_case : Optional[Any]):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case)
@property
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _snake_case , )
return self.image_processor
| 169
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A (__A : int ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def A (__A : int , __A : int , __A : int , __A : int , __A : int , __A : int ) -> tuple[int, int]:
"""simple docstring"""
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(__A , __A )
top //= hcf
bottom //= hcf
return top, bottom
def A (__A : int = 35 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(__A ) and is_sq(__A ):
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__A ) and is_sq(__A ):
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
for num, den in unique_s:
total += Fraction(__A , __A )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 169
| 1
|
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a : Tuple = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : List[str] , a_ : Path , a_ : Union[str, None] = None , a_ : Union[List[str], None] = None , a_ : Union[str, List[str], None] = None , a_ : bool = True , ):
"""simple docstring"""
__snake_case = [file for file in os.listdir(a_ ) if os.path.isfile(os.path.join(a_ , a_ ) )]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a_ , a_ ):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append("__init__.py" )
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a_ )
if only_modules:
__snake_case = file.split("." )[0]
try:
__snake_case = getattr(a_ , a_ )
__snake_case = doctest.DocTestSuite(a_ )
__snake_case = unittest.TextTestRunner().run(a_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def A ( self : int ):
"""simple docstring"""
__snake_case = Path("src/transformers" )
__snake_case = "modeling"
__snake_case = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a_ , identifier=a_ , ignore_files=a_ )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = Path("src/transformers" )
__snake_case = "tokenization"
self.analyze_directory(a_ , identifier=a_ )
def A ( self : Any ):
"""simple docstring"""
__snake_case = Path("src/transformers" )
__snake_case = "configuration"
self.analyze_directory(a_ , identifier=a_ )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = Path("src/transformers" )
__snake_case = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a_ , n_identifier=a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = Path("docs/source" )
__snake_case = ["favicon.ico"]
self.analyze_directory(a_ , ignore_files=a_ , only_modules=a_ )
| 69
|
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Tuple = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """autoformer"""
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : List[Any] , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : str = "student_t" , a_ : str = "nll" , a_ : int = 1 , a_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , a_ : bool = True , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : Optional[List[int]] = None , a_ : Optional[List[int]] = None , a_ : int = 64 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 32 , a_ : int = 32 , a_ : str = "gelu" , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : int = 100 , a_ : float = 0.02 , a_ : bool = True , a_ : Union[str, Any]=True , a_ : int = 10 , a_ : int = 25 , a_ : int = 3 , **a_ : Tuple , ):
"""simple docstring"""
__snake_case = prediction_length
__snake_case = context_length if context_length is not None else prediction_length
__snake_case = distribution_output
__snake_case = loss
__snake_case = input_size
__snake_case = num_time_features
__snake_case = lags_sequence
__snake_case = scaling
__snake_case = num_dynamic_real_features
__snake_case = num_static_real_features
__snake_case = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__snake_case = cardinality
else:
__snake_case = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__snake_case = embedding_dimension
else:
__snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__snake_case = num_parallel_samples
# Transformer architecture configuration
__snake_case = input_size * len(self.lags_sequence ) + self._number_of_features
__snake_case = d_model
__snake_case = encoder_attention_heads
__snake_case = decoder_attention_heads
__snake_case = encoder_ffn_dim
__snake_case = decoder_ffn_dim
__snake_case = encoder_layers
__snake_case = decoder_layers
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = activation_function
__snake_case = init_std
__snake_case = use_cache
# Autoformer
__snake_case = label_length
__snake_case = moving_average
__snake_case = autocorrelation_factor
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 69
| 1
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : Tuple = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowercase ( __snake_case : Tuple , __snake_case : List[Any]=None ):
require_version(deps[pkg] , __snake_case )
| 141
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Dict , A : Optional[int] , A : Union[str, Any]=7 , A : Optional[Any]=3 , A : Dict=30 , A : List[Any]=4_00 , A : Tuple=True , A : Optional[int]=None , A : List[Any]=0.9 , A : Union[str, Any]=None , A : int=True , A : Optional[Any]=[0.5, 0.5, 0.5] , A : Optional[int]=[0.5, 0.5, 0.5] , ) -> Dict:
lowercase_ : str = size if size is not None else {'''shortest_edge''': 30}
lowercase_ : int = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
lowercase_ : int = parent
lowercase_ : Dict = batch_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : Optional[Any] = min_resolution
lowercase_ : List[str] = max_resolution
lowercase_ : Optional[Any] = do_resize_and_center_crop
lowercase_ : Union[str, Any] = size
lowercase_ : Union[str, Any] = crop_pct
lowercase_ : Tuple = crop_size
lowercase_ : int = do_normalize
lowercase_ : Any = image_mean
lowercase_ : Optional[Any] = image_std
def A ( self : List[str] ) -> Union[str, Any]:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def A ( self : Dict ) -> Optional[Any]:
lowercase_ : Any = PoolFormerImageProcessingTester(self )
@property
def A ( self : Optional[Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Any ) -> Union[str, Any]:
lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''crop_pct''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
def A ( self : Optional[Any] ) -> List[str]:
lowercase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
lowercase_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def A ( self : Any ) -> Optional[int]:
pass
def A ( self : Union[str, Any] ) -> Tuple:
# Initialize image_processing
lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowercase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase_ : List[str] = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[Any] ) -> Tuple:
# Initialize image_processing
lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowercase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase_ : Tuple = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[Any] ) -> List[str]:
# Initialize image_processing
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase_ : Dict = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 141
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : str = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "swin2sr"
lowerCamelCase__ = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Any=180 , __lowerCamelCase : str=[6, 6, 6, 6, 6, 6] , __lowerCamelCase : Dict=[6, 6, 6, 6, 6, 6] , __lowerCamelCase : Optional[Any]=8 , __lowerCamelCase : Any=2.0 , __lowerCamelCase : int=True , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Optional[int]=1e-5 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : Union[str, Any]="1conv" , __lowerCamelCase : List[Any]="pixelshuffle" , **__lowerCamelCase : Optional[Any] , ):
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = use_absolute_embeddings
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = upscale
SCREAMING_SNAKE_CASE = img_range
SCREAMING_SNAKE_CASE = resi_connection
SCREAMING_SNAKE_CASE = upsampler
| 16
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Optional[Any] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 265
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "distilbert"
a__ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , __snake_case=3_05_22 , __snake_case=5_12 , __snake_case=False , __snake_case=6 , __snake_case=12 , __snake_case=7_68 , __snake_case=4 * 7_68 , __snake_case=0.1 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=0.0_2 , __snake_case=0.1 , __snake_case=0.2 , __snake_case=0 , **__snake_case , ):
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = max_position_embeddings
_UpperCamelCase : Dict = sinusoidal_pos_embds
_UpperCamelCase : Optional[int] = n_layers
_UpperCamelCase : Union[str, Any] = n_heads
_UpperCamelCase : Union[str, Any] = dim
_UpperCamelCase : Union[str, Any] = hidden_dim
_UpperCamelCase : Tuple = dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : Optional[int] = activation
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : Union[str, Any] = qa_dropout
_UpperCamelCase : Union[str, Any] = seq_classif_dropout
super().__init__(**__snake_case , pad_token_id=__snake_case)
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 648
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 1
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
_lowercase = OmegaConf.load(snake_case__ )
_lowercase = torch.load(snake_case__ ,map_location="""cpu""" )["""model"""]
_lowercase = list(state_dict.keys() )
# extract state_dict for VQVAE
_lowercase = {}
_lowercase = """first_stage_model."""
for key in keys:
if key.startswith(snake_case__ ):
_lowercase = state_dict[key]
# extract state_dict for UNetLDM
_lowercase = {}
_lowercase = """model.diffusion_model."""
for key in keys:
if key.startswith(snake_case__ ):
_lowercase = state_dict[key]
_lowercase = config.model.params.first_stage_config.params
_lowercase = config.model.params.unet_config.params
_lowercase = VQModel(**snake_case__ ).eval()
vqvae.load_state_dict(snake_case__ )
_lowercase = UNetLDMModel(**snake_case__ ).eval()
unet.load_state_dict(snake_case__ )
_lowercase = DDIMScheduler(
timesteps=config.model.params.timesteps ,beta_schedule="""scaled_linear""" ,beta_start=config.model.params.linear_start ,beta_end=config.model.params.linear_end ,clip_sample=snake_case__ ,)
_lowercase = LDMPipeline(snake_case__ ,snake_case__ ,snake_case__ )
pipeline.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_: Dict = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
A_: Optional[int] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 398
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A )
| 67
| 0
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=True , UpperCAmelCase_=None , **UpperCAmelCase_ ):
lowerCAmelCase = parent
lowerCAmelCase = config_class
lowerCAmelCase = has_text_modality
lowerCAmelCase = kwargs
lowerCAmelCase = common_properties
def __snake_case ( self ):
lowerCAmelCase = self.config_class(**self.inputs_dict )
lowerCAmelCase = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__snake_case , __snake_case ) , msg=F"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(__snake_case ):
try:
setattr(__snake_case , __snake_case , __snake_case )
self.parent.assertEqual(
getattr(__snake_case , __snake_case ) , __snake_case , msg=F"""`{name} value {idx} expected, but was {getattr(__snake_case , __snake_case )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__snake_case ):
try:
lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__snake_case , __snake_case ) , __snake_case , msg=F"""`{name} value {idx} expected, but was {getattr(__snake_case , __snake_case )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __snake_case ( self ):
lowerCAmelCase = self.config_class(**self.inputs_dict )
lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __snake_case )
def __snake_case ( self ):
lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = os.path.join(__snake_case , '''config.json''' )
config_first.to_json_file(__snake_case )
lowerCAmelCase = self.config_class.from_json_file(__snake_case )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __snake_case ( self ):
lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__snake_case )
lowerCAmelCase = self.config_class.from_pretrained(__snake_case )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __snake_case ( self ):
lowerCAmelCase = self.config_class(**self.inputs_dict )
lowerCAmelCase = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = os.path.join(__snake_case , __snake_case )
config_first.save_pretrained(__snake_case )
lowerCAmelCase = self.config_class.from_pretrained(__snake_case , subfolder=__snake_case )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __snake_case ( self ):
lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __snake_case ( self ):
if self.config_class.is_composition:
return
lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(__snake_case )
def __snake_case ( self ):
lowerCAmelCase = copy.deepcopy(__snake_case )
lowerCAmelCase = self.config_class(**__snake_case )
lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(__snake_case , __snake_case ) != value:
wrong_values.append((key, getattr(__snake_case , __snake_case ), value) )
if len(__snake_case ) > 0:
lowerCAmelCase = '''\n'''.join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" )
def __snake_case ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 704
|
import torch
from diffusers import StableDiffusionPipeline
UpperCAmelCase_ ="""path-to-your-trained-model"""
UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
UpperCAmelCase_ ="""A photo of sks dog in a bucket"""
UpperCAmelCase_ =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 33
| 0
|
__snake_case = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__snake_case = [None] * 1_0_0_0_0_0_0_0
__snake_case = True
__snake_case = False
def _A ( _lowercase ) -> Any:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__UpperCamelCase = chain(next_number(UpperCamelCase__ ) )
__UpperCamelCase = number_chain
while number < 10_00_00_00:
__UpperCamelCase = number_chain
number *= 10
return number_chain
def _A ( _lowercase = 10_00_00_00 ) -> str:
"""simple docstring"""
for i in range(1 , UpperCamelCase__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 1
|
from __future__ import annotations
import requests
def _UpperCAmelCase (UpperCamelCase__ : str ):
_A : List[Any] = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(UpperCamelCase__ ).json()
def _UpperCAmelCase (UpperCamelCase__ : int = 10 ):
_A : Dict = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_A : Tuple = requests.get(UpperCamelCase__ ).json()[:max_stories]
return [get_hackernews_story(UpperCamelCase__ ) for story_id in story_ids]
def _UpperCAmelCase (UpperCamelCase__ : int = 10 ):
_A : Dict = hackernews_top_stories(UpperCamelCase__ )
return "\n".join("* [{title}]({url})".format(**UpperCamelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 503
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 427
|
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCamelCase__ ( ):
__snake_case = torch.nn.Linear(2 , 4 )
__snake_case = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case = torch.optim.lr_scheduler.OneCycleLR(a , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCamelCase__ ( a ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCamelCase__ ( a ):
__snake_case = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(a )
class a_ ( UpperCAmelCase__ ):
@require_cuda
def lowercase__ ( self : List[str] ):
__snake_case = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__lowerCAmelCase ):
__snake_case = Accelerator(cpu=__lowerCAmelCase )
def lowercase__ ( self : Optional[int] ):
__snake_case = Accelerator()
__snake_case = GradientState()
assert state.num_steps == 1
__snake_case = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case = False
assert state.sync_gradients is False
GradientState._reset_state()
def lowercase__ ( self : Optional[Any] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def lowercase__ ( self : Optional[int] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def lowercase__ ( self : Union[str, Any] ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__lowerCAmelCase : List[str] , **__lowerCAmelCase : Any ):
pass
with patch('torch.cuda.set_device' , __lowerCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
__snake_case = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def lowercase__ ( self : List[str] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case = get_signature(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase )
# make sure random weights don't match
load_random_weights(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) < 1E-3 )
def lowercase__ ( self : List[str] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case = get_signature(__lowerCAmelCase )
# saving hook
def save_config(__lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str ):
__snake_case = {'class_name': models[0].__class__.__name__}
with open(os.path.join(__lowerCAmelCase , 'data.json' ) , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# loading hook
def load_config(__lowerCAmelCase : int , __lowerCAmelCase : str ):
with open(os.path.join(__lowerCAmelCase , 'data.json' ) , 'r' ) as f:
__snake_case = json.load(__lowerCAmelCase )
__snake_case = config['class_name']
__snake_case = accelerator.register_save_state_pre_hook(__lowerCAmelCase )
__snake_case = accelerator.register_load_state_pre_hook(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def lowercase__ ( self : Union[str, Any] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
__snake_case = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(dummy_obj is None )
def lowercase__ ( self : List[str] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
__snake_case = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def lowercase__ ( self : Optional[int] ):
from transformers import AutoModelForCausalLM
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__lowerCAmelCase , device_map={'': 0} , )
__snake_case = Accelerator()
# This should work
__snake_case = accelerator.prepare(__lowerCAmelCase )
@slow
@require_bnb
def lowercase__ ( self : List[Any] ):
from transformers import AutoModelForCausalLM
__snake_case = Accelerator()
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
__snake_case = infer_auto_device_map(__lowerCAmelCase )
__snake_case = 'cpu'
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=__lowerCAmelCase , load_in_abit=__lowerCAmelCase , llm_inta_enable_fpaa_cpu_offload=__lowerCAmelCase )
# This should not work and get value error
with self.assertRaises(__lowerCAmelCase ):
__snake_case = accelerator.prepare(__lowerCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def lowercase__ ( self : Any ):
from transformers import AutoModelForCausalLM
__snake_case = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
__snake_case = infer_auto_device_map(__lowerCAmelCase )
__snake_case = 1
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__lowerCAmelCase , device_map=__lowerCAmelCase , )
__snake_case = Accelerator()
# This should not work and get value error
with self.assertRaises(__lowerCAmelCase ):
__snake_case = accelerator.prepare(__lowerCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def lowercase__ ( self : Any ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
__snake_case = infer_auto_device_map(__lowerCAmelCase )
__snake_case = 1
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__lowerCAmelCase , device_map=__lowerCAmelCase , )
__snake_case = Accelerator()
# This should work
__snake_case = accelerator.prepare(__lowerCAmelCase )
@require_cuda
def lowercase__ ( self : str ):
__snake_case = torch.nn.Linear(1_0 , 1_0 )
__snake_case = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case = Accelerator(cpu=__lowerCAmelCase )
__snake_case = accelerator.prepare(__lowerCAmelCase )
| 427
| 1
|
def SCREAMING_SNAKE_CASE ( lowercase_ = 1_000 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowercase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 87
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_lowerCamelCase : Dict = 6_378_137.0
_lowerCamelCase : Union[str, Any] = 6_356_752.314_245
_lowerCamelCase : List[Any] = 6378137
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
A__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) )
A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
A__ = haversine_distance(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
A__ = (b_lata + b_lata) / 2
A__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
A__ = (sin(lowercase_ ) ** 2) * (cos(lowercase_ ) ** 2)
A__ = cos(sigma / 2 ) ** 2
A__ = (sigma - sin(lowercase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
A__ = (cos(lowercase_ ) ** 2) * (sin(lowercase_ ) ** 2)
A__ = sin(sigma / 2 ) ** 2
A__ = (sigma + sin(lowercase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145
|
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase =input('''Enter message: ''' )
lowercase =input('''Enter key [alphanumeric]: ''' )
lowercase =input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowercase ='''encrypt'''
lowercase =encrypt_message(lowercase_ , lowercase_ )
elif mode.lower().startswith('''d''' ):
lowercase ='''decrypt'''
lowercase =decrypt_message(lowercase_ , lowercase_ )
print(f'\n{mode.title()}ed message:' )
print(lowercase_ )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str ) -> str:
'''simple docstring'''
return translate_message(lowercase_ , lowercase_ , '''encrypt''' )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str ) -> str:
'''simple docstring'''
return translate_message(lowercase_ , lowercase_ , '''decrypt''' )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str , lowercase_ : str ) -> str:
'''simple docstring'''
lowercase =[]
lowercase =0
lowercase =key.upper()
for symbol in message:
lowercase =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowercase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowercase_ ):
lowercase =0
else:
translated.append(lowercase_ )
return "".join(lowercase_ )
if __name__ == "__main__":
main()
| 145
| 1
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
UpperCAmelCase_ : list[list[str]] = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ : Any = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(_SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ : Optional[Any] = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = ["".join(_SCREAMING_SNAKE_CASE ) for row in temp_grid]
UpperCAmelCase_ : int = "".join(_SCREAMING_SNAKE_CASE )
return output_string
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
UpperCAmelCase_ : list[list[str]] = [[] for _ in range(_SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ : int = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
UpperCAmelCase_ : str = 0
for row in temp_grid: # fills in the characters
UpperCAmelCase_ : List[Any] = input_string[counter : counter + len(_SCREAMING_SNAKE_CASE )]
grid.append(list(_SCREAMING_SNAKE_CASE ) )
counter += len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = "" # reads as zigzag
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ : Optional[Any] = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> dict[int, str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = {}
for key_guess in range(1 , len(_SCREAMING_SNAKE_CASE ) ): # tries every key
UpperCAmelCase_ : Any = decrypt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71
|
from abc import ABC, abstractmethod
from typing import List, Optional
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] ):
# test for the above condition
self.test()
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = 0
lowerCamelCase :Tuple = False
while not completed:
if counter == 1:
self.reset()
lowerCamelCase :Dict = self.advance()
if not self.does_advance(__snake_case ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = self.update(__snake_case )
counter += 1
if counter > 10000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def snake_case ( self : Any ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def snake_case ( self : Dict , __snake_case : int ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def snake_case ( self : Tuple , __snake_case : int ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def snake_case ( self : str ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def snake_case ( self : Optional[Any] ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def snake_case ( self : List[Any] , __snake_case : List[str]=False ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any , __snake_case : List[int] ):
super(__snake_case , self ).__init__()
if not isinstance(__snake_case , __snake_case ) or len(__snake_case ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(__snake_case , __snake_case ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
lowerCamelCase :Union[str, Any] = token_ids
lowerCamelCase :Optional[int] = len(self.token_ids )
lowerCamelCase :Dict = -1 # the index of the currently fulfilled step
lowerCamelCase :Any = False
def snake_case ( self : Any ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def snake_case ( self : Dict , __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(__snake_case )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def snake_case ( self : Union[str, Any] , __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(__snake_case )}" )
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Dict = False
lowerCamelCase :Tuple = False
if self.does_advance(__snake_case ):
self.fulfilled_idx += 1
lowerCamelCase :str = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCamelCase :Optional[Any] = True
lowerCamelCase :Dict = completed
else:
# failed to make progress.
lowerCamelCase :Any = True
self.reset()
return stepped, completed, reset
def snake_case ( self : List[str] ):
lowerCamelCase :List[str] = False
lowerCamelCase :List[str] = 0
def snake_case ( self : List[str] ):
return self.seqlen - (self.fulfilled_idx + 1)
def snake_case ( self : Union[str, Any] , __snake_case : Optional[int]=False ):
lowerCamelCase :Dict = PhrasalConstraint(self.token_ids )
if stateful:
lowerCamelCase :Any = self.seqlen
lowerCamelCase :str = self.fulfilled_idx
lowerCamelCase :Optional[int] = self.completed
return new_constraint
class _lowerCAmelCase :
def __init__( self : Optional[int] , __snake_case : List[List[int]] , __snake_case : Optional[Any]=True ):
lowerCamelCase :int = max([len(__snake_case ) for one in nested_token_ids] )
lowerCamelCase :Optional[Any] = {}
for token_ids in nested_token_ids:
lowerCamelCase :Tuple = root
for tidx, token_id in enumerate(__snake_case ):
if token_id not in level:
lowerCamelCase :List[str] = {}
lowerCamelCase :List[str] = level[token_id]
if no_subsets and self.has_subsets(__snake_case , __snake_case ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F" {nested_token_ids}." )
lowerCamelCase :Optional[Any] = root
def snake_case ( self : List[Any] , __snake_case : List[str] ):
lowerCamelCase :Tuple = self.trie
for current_token in current_seq:
lowerCamelCase :Tuple = start[current_token]
lowerCamelCase :Union[str, Any] = list(start.keys() )
return next_tokens
def snake_case ( self : Optional[Any] , __snake_case : List[Any] ):
lowerCamelCase :List[Any] = self.next_tokens(__snake_case )
return len(__snake_case ) == 0
def snake_case ( self : Optional[int] , __snake_case : Optional[int] ):
lowerCamelCase :Optional[Any] = list(root.values() )
if len(__snake_case ) == 0:
return 1
else:
return sum([self.count_leaves(__snake_case ) for nn in next_nodes] )
def snake_case ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any ):
lowerCamelCase :Tuple = self.count_leaves(__snake_case )
return len(__snake_case ) != leaf_count
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : int , __snake_case : List[List[int]] ):
super(__snake_case , self ).__init__()
if not isinstance(__snake_case , __snake_case ) or len(__snake_case ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(__snake_case , __snake_case ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(__snake_case , __snake_case ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
lowerCamelCase :int = DisjunctiveTrie(__snake_case )
lowerCamelCase :Union[str, Any] = nested_token_ids
lowerCamelCase :Dict = self.trie.max_height
lowerCamelCase :Any = []
lowerCamelCase :Dict = False
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[int] = self.trie.next_tokens(self.current_seq )
if len(__snake_case ) == 0:
return None
else:
return token_list
def snake_case ( self : str , __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__snake_case )}" )
lowerCamelCase :Optional[Any] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def snake_case ( self : Optional[Any] , __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__snake_case )}" )
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :Union[str, Any] = False
if self.does_advance(__snake_case ):
self.current_seq.append(__snake_case )
lowerCamelCase :List[Any] = True
else:
lowerCamelCase :List[str] = True
self.reset()
lowerCamelCase :Union[str, Any] = self.trie.reached_leaf(self.current_seq )
lowerCamelCase :Optional[int] = completed
return stepped, completed, reset
def snake_case ( self : List[str] ):
lowerCamelCase :str = False
lowerCamelCase :Tuple = []
def snake_case ( self : List[str] ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def snake_case ( self : Dict , __snake_case : List[Any]=False ):
lowerCamelCase :str = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCamelCase :List[Any] = self.seqlen
lowerCamelCase :List[str] = self.current_seq
lowerCamelCase :Optional[Any] = self.completed
return new_constraint
class _lowerCAmelCase :
def __init__( self : List[str] , __snake_case : List[Constraint] ):
lowerCamelCase :str = constraints
# max # of steps required to fulfill a given constraint
lowerCamelCase :Union[str, Any] = max([c.seqlen for c in constraints] )
lowerCamelCase :Union[str, Any] = len(__snake_case )
lowerCamelCase :List[Any] = False
self.init_state()
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = []
lowerCamelCase :Optional[Any] = None
lowerCamelCase :Tuple = [constraint.copy(stateful=__snake_case ) for constraint in self.constraints]
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :str = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCamelCase :Any = constraint.advance()
if isinstance(__snake_case , __snake_case ):
token_list.append(__snake_case )
elif isinstance(__snake_case , __snake_case ):
token_list.extend(__snake_case )
else:
lowerCamelCase :int = self.inprogress_constraint.advance()
if isinstance(__snake_case , __snake_case ):
token_list.append(__snake_case )
elif isinstance(__snake_case , __snake_case ):
token_list.extend(__snake_case )
if len(__snake_case ) == 0:
return None
else:
return token_list
def snake_case ( self : List[str] , __snake_case : Optional[List[int]] ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCamelCase , lowerCamelCase :Tuple = self.add(__snake_case )
# the entire list of constraints are fulfilled
if self.completed:
break
def snake_case ( self : Dict , __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
lowerCamelCase , lowerCamelCase :Optional[int] = False, False
if self.completed:
lowerCamelCase :Any = True
lowerCamelCase :Tuple = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCamelCase , lowerCamelCase , lowerCamelCase :int = self.inprogress_constraint.update(__snake_case )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__snake_case ) )
lowerCamelCase :Union[str, Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCamelCase :Dict = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCamelCase :int = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__snake_case ):
lowerCamelCase , lowerCamelCase , lowerCamelCase :Tuple = pending_constraint.update(__snake_case )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(__snake_case )
lowerCamelCase :Tuple = None
if not complete and stepped:
lowerCamelCase :Union[str, Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCamelCase :Optional[int] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCamelCase :Optional[int] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def snake_case ( self : str , __snake_case : Any=True ):
lowerCamelCase :Union[str, Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCamelCase :int = [
constraint.copy(stateful=__snake_case ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCamelCase :Optional[int] = self.inprogress_constraint.copy(stateful=__snake_case )
lowerCamelCase :Union[str, Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 166
| 0
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
A : Optional[Any] = 0
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
A : List[str] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
A : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
A : str = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
A : Optional[int] = os.path.join(__lowerCamelCase , "fake-roberta" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
A : int = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
try:
AutoConfig.register("custom" , __lowerCamelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoConfig.register("model" , __lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoConfig.register("bert" , __lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
A : Optional[Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase )
A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
A : str = AutoConfig.from_pretrained("bert-base" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
with self.assertRaisesRegex(
__lowerCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
A : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase , revision="aaaaaa" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
with self.assertRaisesRegex(
__lowerCamelCase , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
A : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
A : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
A : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__lowerCamelCase )
A : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__lowerCamelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase )
A : List[Any] = AutoConfig.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = "new-model"
try:
AutoConfig.register("new-model" , __lowerCamelCase )
# If remote code is not set, the default is to use local
A : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
A : str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__lowerCamelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
A : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__lowerCamelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 718
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str:
A : Optional[Any] = parent
A : Optional[int] = batch_size
A : List[str] = image_size
A : List[str] = num_channels
A : Tuple = embeddings_size
A : Optional[int] = hidden_sizes
A : Dict = depths
A : Optional[int] = is_training
A : List[str] = use_labels
A : List[Any] = hidden_act
A : Optional[int] = num_labels
A : int = scope
A : List[Any] = len(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[Any] = None
if self.use_labels:
A : Any = ids_tensor([self.batch_size] , self.num_labels )
A : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
A : List[str] = TFRegNetModel(config=__lowerCamelCase )
A : str = model(__lowerCamelCase , training=__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]:
A : List[Any] = self.num_labels
A : int = TFRegNetForImageClassification(__lowerCamelCase )
A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
A : Any = self.prepare_config_and_inputs()
A , A , A : str = config_and_inputs
A : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
a__ = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Optional[Any] = TFRegNetModelTester(self )
A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
A : int = model_class(__lowerCamelCase )
A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase )
A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : str = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : List[str] = layer_type
A : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ):
A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase )
A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple()
def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ):
if isinstance(__lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ):
recursive_check(__lowerCamelCase , __lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase )
for model_class in self.all_model_classes:
A : Tuple = model_class(__lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A : Optional[int] = self.default_image_processor
A : List[Any] = prepare_img()
A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" )
# forward pass
A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase )
# verify the logits
A : Dict = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
| 17
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = '''ClapFeatureExtractor'''
__lowercase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , __lowercase , __lowercase ):
"""simple docstring"""
super().__init__(__lowercase , __lowercase )
def __call__( self , __lowercase=None , __lowercase=None , __lowercase=None , **__lowercase ):
"""simple docstring"""
__A : Any = kwargs.pop('sampling_rate' , __lowercase )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
__A : List[str] = self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if audios is not None:
__A : Dict = self.feature_extractor(
__lowercase , sampling_rate=__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None and audios is not None:
__A : Optional[int] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def snake_case__ ( self , *__lowercase , **__lowercase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def snake_case__ ( self , *__lowercase , **__lowercase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[int] = self.tokenizer.model_input_names
__A : int = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 365
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : int = '''dandelin/vilt-b32-finetuned-vqa'''
__lowercase : int = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
__lowercase : Union[str, Any] = '''image_qa'''
__lowercase : Optional[Any] = AutoProcessor
__lowercase : Tuple = AutoModelForVisualQuestionAnswering
__lowercase : Tuple = ['''image''', '''text''']
__lowercase : List[str] = ['''text''']
def __init__( self , *__lowercase , **__lowercase ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*__lowercase , **__lowercase )
def snake_case__ ( self , __lowercase , __lowercase ):
"""simple docstring"""
return self.pre_processor(__lowercase , __lowercase , return_tensors='pt' )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
with torch.no_grad():
return self.model(**__lowercase ).logits
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : List[str] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 365
| 1
|
'''simple docstring'''
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase__ ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
__lowercase = botoa.client("""iam""" )
__lowercase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__UpperCamelCase , AssumeRolePolicyDocument=json.dumps(__UpperCamelCase , indent=2 ) )
__lowercase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__UpperCamelCase , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(__UpperCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase__ ( __UpperCamelCase : int ):
'''simple docstring'''
__lowercase = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__UpperCamelCase )["Role"]["Arn"]
def lowercase__ ( ):
'''simple docstring'''
__lowercase = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __UpperCamelCase , )
__lowercase = None
if credentials_configuration == 0:
__lowercase = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__lowercase = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__lowercase = _ask_field("""AWS Access Key ID: """ )
__lowercase = aws_access_key_id
__lowercase = _ask_field("""AWS Secret Access Key: """ )
__lowercase = aws_secret_access_key
__lowercase = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__lowercase = aws_region
__lowercase = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __UpperCamelCase , )
if role_management == 0:
__lowercase = _ask_field("""Enter your IAM role name: """ )
else:
__lowercase = """accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(__UpperCamelCase )
__lowercase = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__UpperCamelCase , error_message="""Please enter yes or no.""" , )
__lowercase = None
if is_custom_docker_image:
__lowercase = _ask_field("""Enter your Docker image: """ , lambda __UpperCamelCase : str(__UpperCamelCase ).lower() )
__lowercase = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__UpperCamelCase , error_message="""Please enter yes or no.""" , )
__lowercase = None
if is_sagemaker_inputs_enabled:
__lowercase = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __UpperCamelCase : str(__UpperCamelCase ).lower() , )
__lowercase = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__UpperCamelCase , error_message="""Please enter yes or no.""" , )
__lowercase = None
if is_sagemaker_metrics_enabled:
__lowercase = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __UpperCamelCase : str(__UpperCamelCase ).lower() , )
__lowercase = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__lowercase = {}
__lowercase = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__UpperCamelCase , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__lowercase = """dynamo_"""
__lowercase = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__lowercase = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__UpperCamelCase , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__lowercase = _ask_options(
"""Which mode do you want to use?""" , __UpperCamelCase , lambda __UpperCamelCase : TORCH_DYNAMO_MODES[int(__UpperCamelCase )] , default="""default""" , )
__lowercase = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__UpperCamelCase , error_message="""Please enter yes or no.""" , )
__lowercase = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__UpperCamelCase , error_message="""Please enter yes or no.""" , )
__lowercase = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__lowercase = _ask_options(
__UpperCamelCase , __UpperCamelCase , lambda __UpperCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__UpperCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__lowercase = _ask_field(__UpperCamelCase , lambda __UpperCamelCase : str(__UpperCamelCase ).lower() , default="""ml.p3.2xlarge""" )
__lowercase = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__lowercase = _ask_field(
"""How many machines do you want use? [1]: """ , __UpperCamelCase , default=1 , )
__lowercase = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__UpperCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__UpperCamelCase , use_cpu=__UpperCamelCase , dynamo_config=__UpperCamelCase , eca_instance_type=__UpperCamelCase , profile=__UpperCamelCase , region=__UpperCamelCase , iam_role_name=__UpperCamelCase , mixed_precision=__UpperCamelCase , num_machines=__UpperCamelCase , sagemaker_inputs_file=__UpperCamelCase , sagemaker_metrics_file=__UpperCamelCase , )
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : str = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['PoolFormerFeatureExtractor']
snake_case : List[Any] = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 339
| 0
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __UpperCamelCase( _A : str , _A : float | Decimal , _A : float = 10**-10 ):
'''simple docstring'''
UpperCAmelCase__ : int = a
while True:
UpperCAmelCase__ : Optional[int] = Decimal(_A ) - (
Decimal(eval(_A ) ) / Decimal(eval(str(diff(_A ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_A ) ) < precision: # noqa: S307
return float(_A )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 614
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCamelCase__ : Optional[Any] = logging.getLogger(__name__)
UpperCamelCase__ : Dict = 50 # max width of layer names
UpperCamelCase__ : Any = 70 # max width of quantizer names
def __UpperCamelCase( _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=_A , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=_A , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=_A , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=_A , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=_A , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=_A , type=_A , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=_A , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def __UpperCamelCase( _A : Tuple ):
'''simple docstring'''
if args.calibrator == "max":
UpperCAmelCase__ : str = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
UpperCAmelCase__ : Dict = '''histogram'''
elif args.calibrator == "mse":
UpperCAmelCase__ : Any = '''histogram'''
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
UpperCAmelCase__ : Dict = QuantDescriptor(num_bits=args.aprec , calib_method=_A )
UpperCAmelCase__ : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_A )
quant_nn.QuantLinear.set_default_quant_desc_weight(_A )
def __UpperCamelCase( _A : Any , _A : Any , _A : Any=False , _A : Optional[Any]=False ):
'''simple docstring'''
logger.info('''Configuring Model for Quantization''' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_A , ['''embeddings'''] , which='''weight''' , _disabled=_A )
if args.quant_disable:
set_quantizer_by_name(_A , [''''''] , _disabled=_A )
if args.quant_disable_keyword:
set_quantizer_by_name(_A , args.quant_disable_keyword , _disabled=_A )
if args.quant_disable_layer_module:
set_quantizer_by_name(_A , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=_A )
if args.quant_enable_layer_module:
set_quantizer_by_name(_A , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=_A )
if args.recalibrate_weights:
recalibrate_weights(_A )
if args.fuse_qkv:
fuse_qkv(_A , _A )
if args.clip_gelu:
clip_gelu(_A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_A )
def __UpperCamelCase( _A : str ):
'''simple docstring'''
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def __UpperCamelCase( _A : Tuple , _A : Any ):
'''simple docstring'''
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_A )
def __UpperCamelCase( _A : Dict , _A : Optional[int] ):
'''simple docstring'''
def fusea(_A : Optional[Any] , _A : Optional[Any] , _A : Dict ):
for mod in [qq, qk, qv]:
if not hasattr(_A , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
UpperCAmelCase__ : Dict = qq._amax.detach().item()
UpperCAmelCase__ : List[Any] = qk._amax.detach().item()
UpperCAmelCase__ : Optional[int] = qv._amax.detach().item()
UpperCAmelCase__ : Dict = max(_A , _A , _A )
qq._amax.fill_(_A )
qk._amax.fill_(_A )
qv._amax.fill_(_A )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def __UpperCamelCase( _A : Dict , _A : Any ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
UpperCAmelCase__ : Union[str, Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_A )
UpperCAmelCase__ : Tuple = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def __UpperCamelCase( _A : str ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_A , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase__ : int = mod.weight.shape[0]
UpperCAmelCase__ : Tuple = mod._weight_quantizer._amax.detach()
UpperCAmelCase__ : Optional[int] = torch.ones(_A , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def __UpperCamelCase( _A : List[str] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_A , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase__ : Any = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase__ : Optional[Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase__ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_A , keepdims=_A ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
UpperCAmelCase__ : str = amax
def __UpperCamelCase( _A : Dict , _A : Tuple=25 , _A : Any=1_80 , _A : Optional[int]=None ):
'''simple docstring'''
if ignore is None:
UpperCAmelCase__ : Dict = []
elif not isinstance(_A , _A ):
UpperCAmelCase__ : int = [ignore]
UpperCAmelCase__ : Optional[int] = 0
for name, mod in model.named_modules():
if not hasattr(_A , '''weight''' ):
continue
UpperCAmelCase__ : Dict = max(_A , len(_A ) )
for name, mod in model.named_modules():
UpperCAmelCase__ : str = getattr(_A , '''_input_quantizer''' , _A )
UpperCAmelCase__ : int = getattr(_A , '''_weight_quantizer''' , _A )
if not hasattr(_A , '''weight''' ):
continue
if type(_A ) in ignore:
continue
if [True for s in ignore if type(_A ) is str and s in name]:
continue
UpperCAmelCase__ : Dict = F'''Act:{input_q.extra_repr()}'''
UpperCAmelCase__ : int = F'''Wgt:{weight_q.extra_repr()}'''
UpperCAmelCase__ : Dict = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(_A ) <= line_width:
logger.info(_A )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{' ':{name_width}} {wgt_str}''' )
def __UpperCamelCase( _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = 0
for name, mod in model.named_modules():
if isinstance(_A , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def __UpperCamelCase( _A : Dict , _A : Optional[Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = getattr(_A , _A , _A )
if quantizer_mod is not None:
assert hasattr(_A , _A )
setattr(_A , _A , _A )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def __UpperCamelCase( _A : str , _A : Any , _A : Optional[int]="both" , **_A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(_A , _A , '''_input_quantizer''' , _A , _A )
if which in ["weight", "both"]:
set_quantizer(_A , _A , '''_weight_quantizer''' , _A , _A )
logger.info(_A )
def __UpperCamelCase( _A : Tuple , _A : List[str] , **_A : Optional[Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_A , '''_input_quantizer''' ) or hasattr(_A , '''_weight_quantizer''' ):
for n in names:
if re.search(_A , _A ):
set_quantizers(_A , _A , **_A )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(_A , _A ):
UpperCAmelCase__ : str = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(_A , _A , _A )
logger.info(_A )
| 614
| 1
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCamelCase : Tuple = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
__UpperCamelCase : str = f"https://www.google.com/search?q={query}&num=100"
__UpperCamelCase : List[str] = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
__UpperCamelCase : List[str] = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
__UpperCamelCase : Optional[Any] = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 713
|
from math import factorial, radians
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int = 18 , SCREAMING_SNAKE_CASE : int = 10 ):
"""simple docstring"""
UpperCamelCase__ : Dict = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
UpperCamelCase__ : int = radians(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = angle_in_radians
UpperCamelCase__ : str = 3
UpperCamelCase__ : int = -1
for _ in range(SCREAMING_SNAKE_CASE ):
result += (b * (angle_in_radians**a)) / factorial(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__import__("doctest").testmod()
| 106
| 0
|
'''simple docstring'''
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Optional[Any] =[
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int , lowerCamelCase__ : list[list[int]] , ):
'''simple docstring'''
A: Optional[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase__ ) )
] # the reference grid
A: Any = 1
A: List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase__ ) )
] # the action grid
A: Optional[int] = init[0]
A: str = init[1]
A: int = 0
A: Tuple = g + heuristic[x][y] # cost from starting cell to destination cell
A: int = [[f, g, x, y]]
A: Tuple = False # flag that is set when search is complete
A: str = False # flag set if we can't find expand
while not found and not resign:
if len(lowerCamelCase__ ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A: Dict = cell.pop()
A: Optional[Any] = next_cell[2]
A: Union[str, Any] = next_cell[3]
A: Optional[int] = next_cell[1]
if x == goal[0] and y == goal[1]:
A: int = True
else:
for i in range(len(lowerCamelCase__ ) ): # to try out different valid actions
A: Tuple = x + DIRECTIONS[i][0]
A: int = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowerCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A: Union[str, Any] = g + cost
A: Optional[int] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A: Dict = 1
A: Optional[Any] = i
A: Union[str, Any] = []
A: List[Any] = goal[0]
A: str = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A: Any = x - DIRECTIONS[action[x][y]][0]
A: Dict = y - DIRECTIONS[action[x][y]][1]
A: Any = xa
A: Optional[int] = ya
invpath.append([x, y] )
A: Any = []
for i in range(len(lowerCamelCase__ ) ):
path.append(invpath[len(lowerCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] =[
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__SCREAMING_SNAKE_CASE : Dict =[0, 0]
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE : Optional[Any] =[len(grid) - 1, len(grid[0]) - 1]
__SCREAMING_SNAKE_CASE : Optional[Any] =1
# the cost map which pushes the path closer to the goal
__SCREAMING_SNAKE_CASE : Optional[Any] =[[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__SCREAMING_SNAKE_CASE : Any =abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__SCREAMING_SNAKE_CASE : Dict =99
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int =search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 135
|
'''simple docstring'''
import math
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : float = 0.1 ):
'''simple docstring'''
A: Union[str, Any] = 3
A: Tuple = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCamelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = (DPMSolverSinglestepScheduler,)
lowerCAmelCase__ = (("""num_inference_steps""", 25),)
def __A ( self , **a__ ):
_UpperCAmelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**a__ )
return config
def __A ( self , a__=0 , **a__ ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('num_inference_steps' , a__ )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**a__ )
_UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
_UpperCAmelCase = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(a__ , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
_UpperCAmelCase = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __A ( self ):
pass
def __A ( self , a__=0 , **a__ ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('num_inference_steps' , a__ )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
_UpperCAmelCase = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
_UpperCAmelCase = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __A ( self , a__=None , **a__ ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**a__ )
_UpperCAmelCase = scheduler_class(**a__ )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**a__ )
_UpperCAmelCase = scheduler_class(**a__ )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(a__ , a__ )
_UpperCAmelCase = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def __A ( self ):
_UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = 50
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_UpperCAmelCase = model(a__ , a__ )
_UpperCAmelCase = scheduler.step(a__ , a__ , a__ ).prev_sample
_UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_574 ) < 1E-3
def __A ( self ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=a__ )
def __A ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=a__ )
_UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=a__ )
_UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def __A ( self ):
self.check_over_configs(thresholding=a__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type='dpmsolver++' , solver_order=a__ , solver_type=a__ , )
def __A ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def __A ( self ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
_UpperCAmelCase = self.full_loop(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
assert not torch.isnan(a__ ).any(), "Samples have nan numbers"
def __A ( self ):
self.check_over_configs(lower_order_final=a__ )
self.check_over_configs(lower_order_final=a__ )
def __A ( self ):
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __A ( self ):
self.check_over_configs(variance_type=a__ )
self.check_over_configs(variance_type='learned_range' )
def __A ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=a__ , time_step=0 )
def __A ( self ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def __A ( self ):
_UpperCAmelCase = self.full_loop(use_karras_sigmas=a__ )
_UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_248 ) < 1E-3
def __A ( self ):
_UpperCAmelCase = self.full_loop(prediction_type='v_prediction' )
_UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.1_453 ) < 1E-3
def __A ( self ):
_UpperCAmelCase = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=a__ )
_UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.0_649 ) < 1E-3
def __A ( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**a__ )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(a__ , a__ )
_UpperCAmelCase = scheduler.step(a__ , a__ , a__ ).prev_sample
assert sample.dtype == torch.floataa
| 494
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = """deberta-v2"""
def __init__( self , a__=12_81_00 , a__=15_36 , a__=24 , a__=24 , a__=61_44 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=0 , a__=0.02 , a__=1E-7 , a__=False , a__=-1 , a__=0 , a__=True , a__=None , a__=0 , a__="gelu" , **a__ , ):
super().__init__(**a__ )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = relative_attention
_UpperCAmelCase = max_relative_positions
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = position_biased_input
# Backwards compatibility
if type(a__ ) == str:
_UpperCAmelCase = [x.strip() for x in pos_att_type.lower().split('|' )]
_UpperCAmelCase = pos_att_type
_UpperCAmelCase = vocab_size
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = kwargs.get('pooler_hidden_size' , a__ )
_UpperCAmelCase = pooler_dropout
_UpperCAmelCase = pooler_hidden_act
class lowerCAmelCase ( snake_case ):
@property
def __A ( self ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def __A ( self ):
return 12
def __A ( self , a__ , a__ = -1 , a__ = -1 , a__ = -1 , a__ = False , a__ = None , a__ = 3 , a__ = 40 , a__ = 40 , a__ = None , ):
_UpperCAmelCase = super().generate_dummy_inputs(preprocessor=a__ , framework=a__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 494
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowercase =logging.get_logger(__name__)
lowercase ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowercase ={
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
lowercase ={
'RUCAIBox/mvp': 1024,
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =VOCAB_FILES_NAMES
UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase =["input_ids", "attention_mask"]
UpperCAmelCase =MvpTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
_UpperCAmelCase : Tuple =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , snake_case) != add_prefix_space:
_UpperCAmelCase : List[str] =getattr(snake_case , pre_tok_state.pop('type'))
_UpperCAmelCase : List[str] =add_prefix_space
_UpperCAmelCase : Tuple =pre_tok_class(**snake_case)
_UpperCAmelCase : Optional[int] =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCAmelCase : int ='post_processor'
_UpperCAmelCase : List[str] =getattr(self.backend_tokenizer , snake_case , snake_case)
if tokenizer_component_instance:
_UpperCAmelCase : Optional[int] =json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase : Optional[Any] =tuple(state['sep'])
if "cls" in state:
_UpperCAmelCase : int =tuple(state['cls'])
_UpperCAmelCase : Tuple =False
if state.get('add_prefix_space' , snake_case) != add_prefix_space:
_UpperCAmelCase : Optional[Any] =add_prefix_space
_UpperCAmelCase : str =True
if state.get('trim_offsets' , snake_case) != trim_offsets:
_UpperCAmelCase : List[Any] =trim_offsets
_UpperCAmelCase : Any =True
if changes_to_apply:
_UpperCAmelCase : Tuple =getattr(snake_case , state.pop('type'))
_UpperCAmelCase : List[Any] =component_class(**snake_case)
setattr(self.backend_tokenizer , snake_case , snake_case)
@property
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def lowerCAmelCase ( self , snake_case) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] =AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case) if isinstance(snake_case , snake_case) else value
_UpperCAmelCase : List[Any] =value
def lowerCAmelCase ( self , *snake_case , **snake_case) -> BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : List[Any] =kwargs.get('is_split_into_words' , snake_case)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*snake_case , **snake_case)
def lowerCAmelCase ( self , *snake_case , **snake_case) -> BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =kwargs.get('is_split_into_words' , snake_case)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.')
return super()._encode_plus(*snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case , snake_case = None) -> Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : str =self._tokenizer.model.save(snake_case , name=snake_case)
return tuple(snake_case)
def lowerCAmelCase ( self , snake_case , snake_case=None) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , snake_case , snake_case = None) -> List[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] =[self.sep_token_id]
_UpperCAmelCase : List[str] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 446
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
'''simple docstring'''
_UpperCAmelCase : int =[]
for line in lines:
_UpperCAmelCase : Dict =re.sub(R'#.*' , '' , __lowerCamelCase ) # remove comments
if line:
filtered_lines.append(__lowerCamelCase )
_UpperCAmelCase : str ='\n'.join(__lowerCamelCase )
# Make a hash from all this code
_UpperCAmelCase : str =full_str.encode('utf-8' )
return shaaaa(__lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
lowercase ={
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowercase ={
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowercase ={'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowercase ={}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 446
| 1
|
import numpy as np
import datasets
A__ = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
A__ = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
A__ = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def snake_case ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def snake_case ( self : List[Any] , __snake_case : str , __snake_case : Union[str, Any] ):
# convert to numpy arrays
lowerCamelCase :List[Any] = np.array(__snake_case )
lowerCamelCase :str = np.array(__snake_case )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowerCamelCase :Dict = X - np.mean(__snake_case )
lowerCamelCase :Optional[int] = np.cov(reference_distribution.T )
try:
lowerCamelCase :List[Any] = np.linalg.inv(__snake_case )
except np.linalg.LinAlgError:
lowerCamelCase :int = np.linalg.pinv(__snake_case )
lowerCamelCase :Optional[Any] = np.dot(__snake_case , __snake_case )
lowerCamelCase :Optional[int] = np.dot(__snake_case , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 49
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase_ ( lowercase ):
__lowercase : Optional[Any] = ["pixel_values"]
def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = True , lowerCamelCase_ = 1 / 2_55 , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
_UpperCamelCase = size if size is not None else {"shortest_edge": 3_84}
_UpperCamelCase = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
_UpperCamelCase = do_resize
_UpperCamelCase = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCamelCase = crop_pct if crop_pct is not None else 2_24 / 2_56
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = PILImageResampling.BICUBIC , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
_UpperCamelCase = size["shortest_edge"]
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCamelCase = int(shortest_edge / crop_pct )
_UpperCamelCase = get_resize_output_image_size(lowerCamelCase_ , size=lowerCamelCase_ , default_to_square=lowerCamelCase_ )
_UpperCamelCase = resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase_ , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase_ , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> int:
"""simple docstring"""
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
_UpperCamelCase = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , crop_pct=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
_UpperCamelCase = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 147
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 147
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCamelCase = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 363
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Union[str, Any] = """time_series_transformer"""
__snake_case : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self :List[Any] , __lowercase :Optional[int] = None , __lowercase :Optional[int] = None , __lowercase :str = "student_t" , __lowercase :str = "nll" , __lowercase :int = 1 , __lowercase :List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase :Optional[Union[str, bool]] = "mean" , __lowercase :int = 0 , __lowercase :int = 0 , __lowercase :int = 0 , __lowercase :int = 0 , __lowercase :Optional[List[int]] = None , __lowercase :Optional[List[int]] = None , __lowercase :int = 32 , __lowercase :int = 32 , __lowercase :int = 2 , __lowercase :int = 2 , __lowercase :int = 2 , __lowercase :int = 2 , __lowercase :bool = True , __lowercase :str = "gelu" , __lowercase :int = 64 , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :int = 100 , __lowercase :float = 0.02 , __lowercase :int=True , **__lowercase :Optional[Any] , ):
# time series specific configuration
__lowerCamelCase : Tuple =prediction_length
__lowerCamelCase : List[Any] =context_length or prediction_length
__lowerCamelCase : Dict =distribution_output
__lowerCamelCase : str =loss
__lowerCamelCase : Tuple =input_size
__lowerCamelCase : int =num_time_features
__lowerCamelCase : int =lags_sequence
__lowerCamelCase : Optional[int] =scaling
__lowerCamelCase : str =num_dynamic_real_features
__lowerCamelCase : Optional[Any] =num_static_real_features
__lowerCamelCase : List[Any] =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowerCamelCase : Optional[int] =cardinality
else:
__lowerCamelCase : str =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowerCamelCase : int =embedding_dimension
else:
__lowerCamelCase : Optional[int] =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCamelCase : List[str] =num_parallel_samples
# Transformer architecture configuration
__lowerCamelCase : str =input_size * len(__lowercase ) + self._number_of_features
__lowerCamelCase : Union[str, Any] =d_model
__lowerCamelCase : int =encoder_attention_heads
__lowerCamelCase : int =decoder_attention_heads
__lowerCamelCase : Dict =encoder_ffn_dim
__lowerCamelCase : int =decoder_ffn_dim
__lowerCamelCase : List[Any] =encoder_layers
__lowerCamelCase : int =decoder_layers
__lowerCamelCase : Union[str, Any] =dropout
__lowerCamelCase : Optional[Any] =attention_dropout
__lowerCamelCase : List[str] =activation_dropout
__lowerCamelCase : List[str] =encoder_layerdrop
__lowerCamelCase : int =decoder_layerdrop
__lowerCamelCase : Tuple =activation_function
__lowerCamelCase : str =init_std
__lowerCamelCase : Dict =use_cache
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def __lowercase ( self :int ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 363
| 1
|
"""simple docstring"""
from collections.abc import Sequence
def _snake_case ( _snake_case : Sequence[float] , _snake_case : bool = False ) -> float:
'''simple docstring'''
if not arr:
return 0
_A = 0 if allow_empty_subarrays else float('-inf' )
_A = 0.0
for num in arr:
_A = max(0 if allow_empty_subarrays else num , curr_sum + num )
_A = max(_snake_case , _snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
a = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 7
|
"""simple docstring"""
import numpy as np
def A__ ( __lowerCamelCase ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 589
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
SCREAMING_SNAKE_CASE_ : List[str] = {
'input_ids': tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] ,dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa ),
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ )['last_hidden_state']
SCREAMING_SNAKE_CASE_ : List[Any] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape ,snake_case__ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ : str = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 708
|
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : Optional[int] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase : Optional[Any] = {
'yjernite/retribert-base-uncased': 5_1_2,
}
lowerCamelCase : Optional[int] = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase =VOCAB_FILES_NAMES
_lowerCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase =PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase =RetriBertTokenizer
_lowerCAmelCase =['input_ids', 'attention_mask']
def __init__( self : int , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Union[str, Any]="[UNK]" , _lowerCamelCase : str="[SEP]" , _lowerCamelCase : Optional[int]="[PAD]" , _lowerCamelCase : Dict="[CLS]" , _lowerCamelCase : Union[str, Any]="[MASK]" , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Any=None , **_lowerCamelCase : List[Any] , ):
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
snake_case__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCamelCase ) != tokenize_chinese_chars
):
snake_case__ : Dict = getattr(_lowerCamelCase , normalizer_state.pop('type' ) )
snake_case__ : int = do_lower_case
snake_case__ : List[Any] = strip_accents
snake_case__ : str = tokenize_chinese_chars
snake_case__ : int = normalizer_class(**_lowerCamelCase )
snake_case__ : Any = do_lower_case
def UpperCAmelCase__ ( self : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Any=None ):
snake_case__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
snake_case__ : List[Any] = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 170
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase__( A ):
if "model" in orig_key:
snake_case__ : Any = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
snake_case__ : Optional[int] = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
snake_case__ : Tuple = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
snake_case__ : List[Any] = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
snake_case__ : Tuple = orig_key.split('.' )[0].split('_' )[-1]
snake_case__ : Optional[Any] = orig_key.replace(f'''transformer_{layer_num}''' , f'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
snake_case__ : Union[str, Any] = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
snake_case__ : Optional[Any] = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
snake_case__ : Optional[int] = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
snake_case__ : List[Any] = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
snake_case__ : str = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
snake_case__ : int = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
snake_case__ : str = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
snake_case__ : Union[str, Any] = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
snake_case__ : int = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
snake_case__ : Optional[int] = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
snake_case__ : Optional[int] = 'yoso.' + orig_key
return orig_key
def lowercase__( A , A ):
for key in orig_state_dict.copy().keys():
snake_case__ : Optional[Any] = orig_state_dict.pop(A )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case__ : Optional[Any] = val
snake_case__ : Tuple = orig_state_dict['cls.predictions.decoder.bias']
snake_case__ : Optional[Any] = torch.arange(A ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase__( A , A , A ):
snake_case__ : Tuple = torch.load(A , map_location='cpu' )['model_state_dict']
snake_case__ : Union[str, Any] = YosoConfig.from_json_file(A )
snake_case__ : Optional[int] = YosoForMaskedLM(A )
snake_case__ : str = convert_checkpoint_helper(config.max_position_embeddings , A )
print(model.load_state_dict(A ) )
model.eval()
model.save_pretrained(A )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 170
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[str] = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_=2 , snake_case_=3 , snake_case_=6_4 , snake_case_=None ) -> List[str]:
'''simple docstring'''
__lowercase = np.random.default_rng(snake_case_ )
__lowercase = length
__lowercase = rng.normal(size=(length,) ).astype(np.floataa )
__lowercase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Dict:
'''simple docstring'''
return self.length
def __getitem__( self , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_=0 , snake_case_=0 , snake_case_=False ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase = True
def A ( self , snake_case_=None ) -> List[Any]:
'''simple docstring'''
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_=0 , snake_case_=0 , snake_case_=False ) -> List[str]:
'''simple docstring'''
super().__init__()
__lowercase = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
__lowercase = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
__lowercase = True
def A ( self , snake_case_=None ) -> str:
'''simple docstring'''
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase = False
return x * self.a + self.b
def lowercase_ ( _UpperCamelCase , _UpperCamelCase = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__lowercase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowercase = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__lowercase = load_dataset('''csv''' , data_files=_UpperCamelCase )
__lowercase = datasets['''train'''].unique('''label''' )
__lowercase = {v: i for i, v in enumerate(_UpperCamelCase )}
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' )
if "label" in examples:
__lowercase = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCamelCase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(_UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__lowercase = DataLoader(tokenized_datasets['''train'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=2 )
__lowercase = DataLoader(tokenized_datasets['''validation'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 527
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Any =1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : str =0
while number > 0:
_UpperCAmelCase : int =number % 1_0
sum_of_digits += last_digit
_UpperCAmelCase : Tuple =number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase__ ( __lowerCamelCase : int = 1_0_0 ):
'''simple docstring'''
_UpperCAmelCase : str =factorial(__lowerCamelCase )
_UpperCAmelCase : Union[str, Any] =split_and_add(__lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 446
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , *snake_case , **snake_case) -> Dict:
'''simple docstring'''
super().__init__(*snake_case , **snake_case)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def lowerCAmelCase ( self , snake_case=None) -> str:
'''simple docstring'''
_UpperCAmelCase : Any ={}
if top_k is not None:
_UpperCAmelCase : Optional[int] =top_k
return {}, {}, postprocess_params
def __call__( self , snake_case , **snake_case) -> List[str]:
'''simple docstring'''
return super().__call__(snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =load_image(snake_case)
_UpperCAmelCase : Tuple =self.image_processor(images=snake_case , return_tensors=self.framework)
return model_inputs
def lowerCAmelCase ( self , snake_case) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.model(**snake_case)
return model_outputs
def lowerCAmelCase ( self , snake_case , snake_case=5) -> Any:
'''simple docstring'''
if top_k > self.model.config.num_labels:
_UpperCAmelCase : Optional[Any] =self.model.config.num_labels
if self.framework == "pt":
_UpperCAmelCase : List[Any] =model_outputs.logits.softmax(-1)[0]
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =probs.topk(snake_case)
elif self.framework == "tf":
_UpperCAmelCase : int =stable_softmax(model_outputs.logits , axis=-1)[0]
_UpperCAmelCase : List[Any] =tf.math.top_k(snake_case , k=snake_case)
_UpperCAmelCase , _UpperCAmelCase : List[str] =topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}")
_UpperCAmelCase : int =scores.tolist()
_UpperCAmelCase : Tuple =ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case , snake_case)]
| 446
| 1
|
"""simple docstring"""
def _A (__a , __a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(_lowercase )
print('''The following activities are selected:''' )
# The first activity is always selected
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
print(_lowercase , end=''',''' )
# Consider rest of the activities
for j in range(_lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_lowercase , end=''',''' )
SCREAMING_SNAKE_CASE_ : List[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Any = [1, 3, 0, 5, 8, 5]
UpperCAmelCase_ : Optional[Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 718
|
"""simple docstring"""
from math import isqrt
def _A (__a ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = False
return [i for i in range(2 , __a ) if is_prime[i]]
def _A (__a = 10**8 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : List[str] = len(__a ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 176
| 0
|
from collections import defaultdict
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :str ) -> bool:
_A = first_str.lower().strip()
_A = second_str.lower().strip()
# Remove whitespace
_A = first_str.replace(''' ''' , '''''' )
_A = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_snake_case ) != len(_snake_case ):
return False
# Default values for count should be 0
_A = defaultdict(_snake_case )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_snake_case ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ = input("""Enter the first string """).strip()
UpperCAmelCase_ = input("""Enter the second string """).strip()
UpperCAmelCase_ = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 2
|
from collections import defaultdict
def _a ( lowerCAmelCase , lowerCAmelCase )-> bool:
SCREAMING_SNAKE_CASE_ = first_str.lower().strip()
SCREAMING_SNAKE_CASE_ = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE_ = first_str.replace(' ' , '' )
SCREAMING_SNAKE_CASE_ = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE_ = defaultdict(lowerCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE: Dict = input('''Enter the first string ''').strip()
SCREAMING_SNAKE_CASE: str = input('''Enter the second string ''').strip()
SCREAMING_SNAKE_CASE: Optional[Any] = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 360
| 0
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowercase__ = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowercase__ = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowercase__ = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 1
|
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(UpperCamelCase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(UpperCamelCase_ )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(UpperCamelCase_ ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(UpperCamelCase_ ) == 0:
break
except Exception:
if len(UpperCamelCase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(UpperCamelCase_ )
else:
pair_keys.append(UpperCamelCase_ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCamelCase_ , UpperCamelCase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCamelCase_ , UpperCamelCase_ )
# update visited list
for item in pair_keys:
visited.append(UpperCamelCase_ )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Tuple = parser.parse_args()
_a : Tuple = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : List[str] = args.dump_path
_a : Optional[Any] = args.lora_prefix_unet
_a : Optional[Any] = args.lora_prefix_text_encoder
_a : List[str] = args.alpha
_a : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : List[Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689
|
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 155
| 0
|
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : Any = len(__lowerCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE__ : List[str] = i + 1
else:
SCREAMING_SNAKE_CASE__ : Any = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 11, 15], 9) = }')
| 12
|
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __a :
'''simple docstring'''
def __init__( self , _a , _a=99 , _a=13 , _a=7 , _a=9 , _a=True , _a=True , _a=False , _a=32 , _a=5 , _a=4 , _a=37 , _a=8 , _a=0.1 , _a=0.002 , _a=1 , _a=0 , _a=0 , _a=None , _a=None , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = encoder_seq_length
SCREAMING_SNAKE_CASE__ : str = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[int] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Tuple = is_training
SCREAMING_SNAKE_CASE__ : Dict = use_attention_mask
SCREAMING_SNAKE_CASE__ : List[str] = use_labels
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = d_ff
SCREAMING_SNAKE_CASE__ : Any = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout_rate
SCREAMING_SNAKE_CASE__ : List[str] = initializer_factor
SCREAMING_SNAKE_CASE__ : List[Any] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = pad_token_id
SCREAMING_SNAKE_CASE__ : Any = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : str = decoder_layers
def _a ( self ) -> Tuple:
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""" )
def _a ( self , _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : int = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_a )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_a )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE__ : Tuple = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
SCREAMING_SNAKE_CASE__ : List[str] = config.num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_inputs_dict(_a , _a , _a )
return config, input_dict
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _a ( self ) -> List[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _a ( self , _a , _a , _a , _a , _a , _a , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = UMTaModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(
input_ids=_a , decoder_input_ids=_a , attention_mask=_a , decoder_attention_mask=_a , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(input_ids=_a , decoder_input_ids=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = result.last_hidden_state
SCREAMING_SNAKE_CASE__ : Dict = result.past_key_values
SCREAMING_SNAKE_CASE__ : Any = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _a ( self , _a , _a , _a , _a , _a , _a , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
SCREAMING_SNAKE_CASE__ : str = model(_a , use_cache=_a )
SCREAMING_SNAKE_CASE__ : str = model(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , past_key_values=_a )["""last_hidden_state"""]
# select random slice
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
def _a ( self , _a , _a , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = UMTaModel(config=_a ).to(_a ).half().eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_a )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE :Optional[int] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE :List[str] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = True
_SCREAMING_SNAKE_CASE :Tuple = False
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :List[Any] = True
_SCREAMING_SNAKE_CASE :List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_SCREAMING_SNAKE_CASE :Union[str, Any] = [0.8, 0.9]
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Dict = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=_a , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] = config_and_inputs[0]
SCREAMING_SNAKE_CASE__ : Tuple = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=_a ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ),
}
for attn_name, (name, mask) in zip(_a , head_masking.items() ):
SCREAMING_SNAKE_CASE__ : List[str] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
SCREAMING_SNAKE_CASE__ : str = torch.ones(
config.num_decoder_layers , config.num_heads , device=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=_a , return_dict_in_generate=_a , **_a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
SCREAMING_SNAKE_CASE__ : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _a ( self ) -> Dict:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=_a ).to(_a )
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=_a , legacy=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_a , return_tensors="""pt""" , padding=_a ).input_ids
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(input_ids.to(_a ) )
SCREAMING_SNAKE_CASE__ : int = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.batch_decode(_a )
self.assertEqual(_a , _a )
| 12
| 1
|
"""simple docstring"""
from maths.prime_check import is_prime
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : List[str] = f"Input value of [number={number}] must be an integer"
raise TypeError(_lowerCamelCase )
if is_prime(_lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 213
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = GPTSwaTokenizer
_UpperCamelCase : Tuple = False
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Any = GPTSwaTokenizer(a__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = """This is a test"""
_lowerCAmelCase : Optional[int] = """This is a test"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : List[Any] = """<s>"""
_lowerCAmelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(a__ ) , 2000 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def __A ( self ):
_lowerCAmelCase : Any = GPTSwaTokenizer(a__ )
_lowerCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [465, 287, 265, 631, 842] )
_lowerCAmelCase : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
a__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(a__ )
# fmt: off
self.assertListEqual(
a__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def __A ( self ):
_lowerCAmelCase : Optional[Any] = GPTSwaTokenizer(a__ )
_lowerCAmelCase : str = ["""This is a test""", """I was born in 92000, and this is falsé."""]
_lowerCAmelCase : List[Any] = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(a__ , a__ ):
self.assertListEqual(tokenizer.encode_fast(a__ ) , a__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(a__ , a__ ):
self.assertEqual(tokenizer.decode_fast(a__ ) , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : str = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
_lowerCAmelCase : List[Any] = {"""input_ids""": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=a__ , )
| 213
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_A = TypeVar('T')
class _lowercase ( Generic[T] ):
def __init__( self , UpperCAmelCase_ ) -> str:
lowerCamelCase : Optional[Any] = data
lowerCamelCase : Node[T] | None = None
def __str__( self ) -> str:
return F"""{self.data}"""
class _lowercase ( Generic[T] ):
def __init__( self ) -> None:
lowerCamelCase : Node[T] | None = None
def __iter__( self ) -> Iterator[T]:
lowerCamelCase : List[Any] = self.top
while node:
yield node.data
lowerCamelCase : Optional[int] = node.next
def __str__( self ) -> str:
return "->".join([str(UpperCAmelCase_ ) for item in self] )
def __len__( self ) -> int:
return len(tuple(iter(self ) ) )
def _UpperCamelCase ( self ) -> bool:
return self.top is None
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> None:
lowerCamelCase : Any = Node(UpperCAmelCase_ )
if not self.is_empty():
lowerCamelCase : Any = self.top
lowerCamelCase : List[str] = node
def _UpperCamelCase ( self ) -> T:
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCAmelCase_ )
lowerCamelCase : Any = self.top
lowerCamelCase : str = self.top.next
return pop_node.data
def _UpperCamelCase ( self ) -> T:
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def _UpperCamelCase ( self ) -> None:
lowerCamelCase : Tuple = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 719
|
"""simple docstring"""
from copy import deepcopy
class _lowercase :
def __init__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = None ) -> None:
if arr is None and size is not None:
lowerCamelCase : Any = size
lowerCamelCase : Optional[int] = [0] * size
elif arr is not None:
self.init(UpperCAmelCase_ )
else:
raise ValueError('Either arr or size must be specified' )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> None:
lowerCamelCase : Optional[int] = len(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = deepcopy(UpperCAmelCase_ )
for i in range(1 , self.size ):
lowerCamelCase : Union[str, Any] = self.next_(UpperCAmelCase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def _UpperCamelCase ( self ) -> list[int]:
lowerCamelCase : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCamelCase : List[str] = self.next_(UpperCAmelCase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _UpperCamelCase ( UpperCAmelCase_ ) -> int:
return index + (index & (-index))
@staticmethod
def _UpperCamelCase ( UpperCAmelCase_ ) -> int:
return index - (index & (-index))
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCamelCase : Union[str, Any] = self.next_(UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
self.add(UpperCAmelCase_ , value - self.get(UpperCAmelCase_ ) )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> int:
if right == 0:
return 0
lowerCamelCase : Dict = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCamelCase : Optional[int] = self.prev(UpperCAmelCase_ )
return result
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
return self.prefix(UpperCAmelCase_ ) - self.prefix(UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> int:
return self.query(UpperCAmelCase_ , index + 1 )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
lowerCamelCase : str = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCamelCase : str = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCamelCase__ : Optional[int] = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def UpperCamelCase ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : List[Any] = True
while ask_again:
lowercase__ : Tuple = input(lowercase_ )
try:
if default is not None and len(lowercase_ ) == 0:
return default
return convert_value(lowercase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_=[] , lowercase_=None , lowercase_=0 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = BulletMenu(lowercase_ , lowercase_ )
lowercase__ : Any = menu.run(default_choice=lowercase_ )
return convert_value(lowercase_ ) if convert_value is not None else result
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Union[str, Any] = int(lowercase_ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : List[str] = int(lowercase_ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : str = int(lowercase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = int(lowercase_ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : List[Any] = int(lowercase_ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _snake_case ( argparse.RawDescriptionHelpFormatter ):
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = super()._format_usage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = usage.replace("""<command> [<args>] """ , """""")
return usage
| 12
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _lowercase :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=13 ,lowerCamelCase_=7 ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=99 ,lowerCamelCase_=32 ,lowerCamelCase_=2 ,lowerCamelCase_=4 ,lowerCamelCase_=37 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=512 ,lowerCamelCase_=16 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=3 ,lowerCamelCase_=4 ,lowerCamelCase_=None ,) -> str:
'''simple docstring'''
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : str = 13
UpperCAmelCase__ : Any = 7
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : List[Any] = 99
UpperCAmelCase__ : str = 32
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Union[str, Any] = 4
UpperCAmelCase__ : Dict = 37
UpperCAmelCase__ : Dict = '''gelu'''
UpperCAmelCase__ : List[str] = 0.1
UpperCAmelCase__ : List[str] = 0.1
UpperCAmelCase__ : Optional[int] = 512
UpperCAmelCase__ : Any = 16
UpperCAmelCase__ : List[Any] = 2
UpperCAmelCase__ : Optional[Any] = 0.02
UpperCAmelCase__ : Optional[int] = 3
UpperCAmelCase__ : Optional[int] = 4
UpperCAmelCase__ : Optional[int] = None
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : List[str] = None
if self.use_input_mask:
UpperCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : str = None
if self.use_token_type_ids:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase__ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,return_dict=lowerCamelCase_ ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = TFRoFormerModel(config=lowerCamelCase_ )
UpperCAmelCase__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__ : str = [input_ids, input_mask]
UpperCAmelCase__ : Any = model(lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Any = TFRoFormerForCausalLM(config=lowerCamelCase_ )
UpperCAmelCase__ : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase__ : int = model(lowerCamelCase_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) ,[self.batch_size, self.seq_length, self.vocab_size] )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFRoFormerForMaskedLM(config=lowerCamelCase_ )
UpperCAmelCase__ : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : List[Any] = TFRoFormerForSequenceClassification(config=lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase__ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : int = self.num_choices
UpperCAmelCase__ : Optional[int] = TFRoFormerForMultipleChoice(config=lowerCamelCase_ )
UpperCAmelCase__ : Tuple = tf.tile(tf.expand_dims(lowerCamelCase_ ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase__ : Optional[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase__ : Dict = tf.tile(tf.expand_dims(lowerCamelCase_ ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase__ : Optional[int] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase__ : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Dict = self.num_labels
UpperCAmelCase__ : Optional[Any] = TFRoFormerForTokenClassification(config=lowerCamelCase_ )
UpperCAmelCase__ : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = TFRoFormerForQuestionAnswering(config=lowerCamelCase_ )
UpperCAmelCase__ : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase__ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ : int = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Optional[int] = False
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFRoFormerModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCAmelCase__ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : Union[str, Any] = model(lowerCamelCase_ )[0]
# TODO Replace vocab size
UpperCAmelCase__ : List[str] = 50000
UpperCAmelCase__ : int = [1, 6, vocab_size]
self.assertEqual(output.shape ,lowerCamelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCAmelCase__ : Tuple = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,lowerCamelCase_ ,atol=1e-4 )
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = 1E-4
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = tf.constant([[4, 10]] )
UpperCAmelCase__ : List[str] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 ,embedding_dim=6 )
UpperCAmelCase__ : Optional[Any] = emba(input_ids.shape )
UpperCAmelCase__ : List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(lowerCamelCase_ ,lowerCamelCase_ ,atol=self.tolerance )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Any = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
UpperCAmelCase__ : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 ,embedding_dim=512 )
emba([2, 16, 512] )
UpperCAmelCase__ : Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(lowerCamelCase_ ,lowerCamelCase_ ,atol=self.tolerance )
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = 1E-4
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 ,dtype=tf.floataa ) ,shape=(2, 12, 16, 64) ) / 100
UpperCAmelCase__ : int = -tf.reshape(tf.range(2 * 12 * 16 * 64 ,dtype=tf.floataa ) ,shape=(2, 12, 16, 64) ) / 100
UpperCAmelCase__ : Optional[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 ,embedding_dim=64 )
UpperCAmelCase__ : int = embed_positions([2, 16, 768] )[None, None, :, :]
UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase__ : Any = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
UpperCAmelCase__ : List[str] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] ,lowerCamelCase_ ,atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] ,lowerCamelCase_ ,atol=self.tolerance )
| 614
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =RobertaTokenizer
UpperCamelCase__ =RobertaTokenizerFast
UpperCamelCase__ =True
UpperCamelCase__ ={"cls_token": "<s>"}
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowerCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
_lowerCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCAmelCase = {'unk_token': '<unk>'}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : Any , **lowercase__ : List[Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : str , **lowercase__ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : Dict ):
_lowerCAmelCase = 'lower newer'
_lowerCAmelCase = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase = 'lower newer'
_lowerCAmelCase = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowerCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True)
self.assertListEqual(lowercase__ , lowercase__ )
_lowerCAmelCase = tokens + [tokenizer.unk_token]
_lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowercase__ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowercase__ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.tokenizer_class.from_pretrained('roberta-base' )
_lowerCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=lowercase__ )
_lowerCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase__ )
_lowerCAmelCase = tokenizer.encode(
'sequence builders' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
_lowerCAmelCase = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = 'Encode this sequence.'
_lowerCAmelCase = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_lowerCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
_lowerCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase__ , lowercase__ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_lowerCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
# Testing spaces after special tokens
_lowerCAmelCase = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
_lowerCAmelCase = 'Encode <mask> sequence'
_lowerCAmelCase = 'Encode <mask>sequence'
_lowerCAmelCase = tokenizer.encode(lowercase__ )
_lowerCAmelCase = encoded.index(lowercase__ )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase__ , lowercase__ )
_lowerCAmelCase = tokenizer.encode(lowercase__ )
_lowerCAmelCase = encoded.index(lowercase__ )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
_lowerCAmelCase = 'A, <mask> AllenNLP sentence.'
_lowerCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
_lowerCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowercase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def SCREAMING_SNAKE_CASE__ ( self : int ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
_lowerCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowerCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowercase__ )
self.assertEqual(post_processor_state['add_prefix_space'] , lowercase__ )
self.assertEqual(post_processor_state['trim_offsets'] , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCAmelCase = f'{text_of_1_token} {text_of_1_token}'
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
_lowerCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
_lowerCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
_lowerCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
_lowerCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
_lowerCAmelCase = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
_lowerCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
_lowerCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
_lowerCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
| 708
|
from __future__ import annotations
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = len(snake_case )
# We need to create solution object to save path.
_lowerCAmelCase = [[0 for _ in range(snake_case )] for _ in range(snake_case )]
_lowerCAmelCase = run_maze(snake_case , 0 , 0 , snake_case )
if solved:
print('\n'.join(str(snake_case ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case ):
_lowerCAmelCase = len(snake_case )
# Final check point.
if i == j == (size - 1):
_lowerCAmelCase = 1
return True
_lowerCAmelCase = (not i < 0) and (not j < 0) # Check lower bounds
_lowerCAmelCase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_lowerCAmelCase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_lowerCAmelCase = 1
# check for directions
if (
run_maze(snake_case , i + 1 , snake_case , snake_case )
or run_maze(snake_case , snake_case , j + 1 , snake_case )
or run_maze(snake_case , i - 1 , snake_case , snake_case )
or run_maze(snake_case , snake_case , j - 1 , snake_case )
):
return True
_lowerCAmelCase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225
| 0
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _lowerCamelCase( __snake_case=32 , __snake_case=10 , __snake_case=100 , __snake_case=1026 , __snake_case=True , __snake_case="data/tokenized_stories_train_wikitext103.jbl" , __snake_case="igf_context_pairs.jbl" , ) -> Any:
set_seed(3 )
# generate train_data and objective_set
__snake_case , __snake_case = generate_datasets(
_UpperCAmelCase , _UpperCAmelCase , number=_UpperCAmelCase , min_len=1026 , trim=_UpperCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__snake_case = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
__snake_case = load_gpta("gpt2" ).to(_UpperCAmelCase )
print("computing perplexity on objective set" )
__snake_case = compute_perplexity(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).item()
print("perplexity on objective set:" , _UpperCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _lowerCamelCase( __snake_case , __snake_case=15 , __snake_case=128 , __snake_case=100 , __snake_case="igf_model.pt" , ) -> str:
set_seed(42 )
# Load pre-trained model
__snake_case = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
__snake_case = SecondaryLearner(_UpperCAmelCase )
# Train secondary learner
__snake_case = train_secondary_learner(
_UpperCAmelCase , _UpperCAmelCase , max_epochs=_UpperCAmelCase , batch_size=_UpperCAmelCase , eval_freq=100 , igf_model_path=_UpperCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case=32 , __snake_case=1000 , __snake_case=16 , __snake_case=1.0 , __snake_case=recopy_gpta , __snake_case=None , __snake_case=10 , __snake_case="gpt2_finetuned.pt" , ) -> List[str]:
__snake_case = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
__snake_case = RandomSampler(_UpperCAmelCase )
__snake_case = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase )
__snake_case = max_steps // (len(_UpperCAmelCase )) + 1
__snake_case = 0
__snake_case = torch.zeros((1, context_len) , dtype=torch.long , device=_UpperCAmelCase )
__snake_case , __snake_case , __snake_case = recopy_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_UpperCAmelCase )
secondary_learner.eval()
__snake_case = []
__snake_case = 0
__snake_case = []
__snake_case = []
# Compute the performance of the transformer model at the beginning
__snake_case = compute_perplexity(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
test_perps.append(_UpperCAmelCase )
print("Test perplexity, step" , _UpperCAmelCase , ":" , _UpperCAmelCase )
for epoch in range(int(_UpperCAmelCase ) ):
for step, example in enumerate(_UpperCAmelCase ):
torch.cuda.empty_cache()
__snake_case = random.randint(0 , example.size(2 ) - context_len - 1 )
__snake_case = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__snake_case = model(_UpperCAmelCase , labels=_UpperCAmelCase )
__snake_case = True
if secondary_learner is not None:
__snake_case = secondary_learner.forward(
torch.tensor(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_UpperCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__snake_case = -1
if predicted_q < threshold:
__snake_case = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__snake_case = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__snake_case = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__snake_case = compute_perplexity(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
test_perps.append(_UpperCAmelCase )
print("Test perplexity, step" , _UpperCAmelCase , ":" , _UpperCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _UpperCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _lowerCamelCase( ) -> List[Any]:
__snake_case = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=_UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=_UpperCAmelCase , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=_UpperCAmelCase , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=_UpperCAmelCase , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=_UpperCAmelCase , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=_UpperCAmelCase , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=_UpperCAmelCase , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=_UpperCAmelCase , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=_UpperCAmelCase , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=_UpperCAmelCase , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=_UpperCAmelCase , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_UpperCAmelCase , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_UpperCAmelCase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
__snake_case = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
__snake_case = training_secondary_learner(
_UpperCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
__snake_case = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__snake_case , __snake_case = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=_UpperCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_UpperCAmelCase , secondary_learner=_UpperCAmelCase , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 524
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=12 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=0.02 , lowerCAmelCase_=0 , lowerCAmelCase_=None , ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = projection_dim
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = scope
__lowercase = bos_token_id
def snake_case__ ( self ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__lowercase = input_mask.numpy()
__lowercase , __lowercase = input_mask.shape
__lowercase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase_ ):
__lowercase = 1
__lowercase = 0
__lowercase = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCAmelCase_ )
def snake_case__ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = TFBlipTextModel(config=lowerCAmelCase_ )
__lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , training=lowerCAmelCase_ )
__lowercase = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case ( __snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def snake_case__ ( self ):
__lowercase = BlipTextModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def snake_case__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def snake_case__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFBlipTextModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCAmelCase_ )
| 321
| 0
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a_ : List[str] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
a_ : Tuple = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
a_ : int = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __UpperCAmelCase ):
a__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __UpperCAmelCase ):
return x[0]
def __a ( __UpperCAmelCase ):
a__ = get_letter_count(__UpperCAmelCase )
a__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__UpperCAmelCase )
a__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__UpperCAmelCase )
a__ = ''''''.join(freq_to_letter[freq] )
a__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__UpperCAmelCase , reverse=__UpperCAmelCase )
a__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__UpperCAmelCase )
def __a ( __UpperCAmelCase ):
a__ = get_frequency_order(__UpperCAmelCase )
a__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148
|
import argparse
from collections import defaultdict
import yaml
a_ : Tuple = 'docs/source/en/_toctree.yml'
def __a ( __UpperCAmelCase ):
a__ = defaultdict(__UpperCAmelCase )
a__ = []
a__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(__UpperCAmelCase )
a__ = new_doc_list
a__ = [key for key, value in counts.items() if value > 1]
a__ = []
for duplicate_key in duplicates:
a__ = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(__UpperCAmelCase ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
a__ = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__UpperCAmelCase ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(__UpperCAmelCase )
# Sort
return overview_doc
def __a ( __UpperCAmelCase=False ):
with open(__UpperCAmelCase , encoding='''utf-8''' ) as f:
a__ = yaml.safe_load(f.read() )
# Get to the API doc
a__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
a__ = content[api_idx]['''sections''']
# Then to the model doc
a__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
a__ = api_doc[scheduler_idx]['''sections''']
a__ = clean_doc_toc(__UpperCAmelCase )
a__ = False
if new_scheduler_doc != scheduler_doc:
a__ = True
if overwrite:
a__ = new_scheduler_doc
if diff:
if overwrite:
a__ = api_doc
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__UpperCAmelCase , allow_unicode=__UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def __a ( __UpperCAmelCase=False ):
with open(__UpperCAmelCase , encoding='''utf-8''' ) as f:
a__ = yaml.safe_load(f.read() )
# Get to the API doc
a__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
a__ = content[api_idx]['''sections''']
# Then to the model doc
a__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
a__ = False
a__ = api_doc[pipeline_idx]['''sections''']
a__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
a__ = pipeline_doc['''section''']
a__ = clean_doc_toc(__UpperCAmelCase )
if overwrite:
a__ = new_sub_pipeline_doc
new_pipeline_docs.append(__UpperCAmelCase )
# sort overall pipeline doc
a__ = clean_doc_toc(__UpperCAmelCase )
if new_pipeline_docs != pipeline_docs:
a__ = True
if overwrite:
a__ = new_pipeline_docs
if diff:
if overwrite:
a__ = api_doc
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__UpperCAmelCase , allow_unicode=__UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a_ : Tuple = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 148
| 1
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase :Optional[Any] = get_tests_dir('fixtures/dummy-config.json')
class UpperCAmelCase ( unittest.TestCase ):
def _A ( self: Union[str, Any] ):
_a = 0
def _A ( self: int ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def _A ( self: Optional[int] ):
_a = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _A ( self: str ):
_a = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _A ( self: Any ):
_a = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _A ( self: Union[str, Any] ):
_a = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _A ( self: str ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_a = os.path.join(__UpperCamelCase , '''fake-roberta''' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with open(os.path.join(__UpperCamelCase , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
_a = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertEqual(type(__UpperCamelCase ) , __UpperCamelCase )
def _A ( self: Union[str, Any] ):
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
# Wrong model type will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoConfig.register('''model''' , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoConfig.register('''bert''' , __UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCamelCase )
_a = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _A ( self: Dict ):
with self.assertRaisesRegex(
__UpperCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
_a = AutoConfig.from_pretrained('''bert-base''' )
def _A ( self: List[str] ):
with self.assertRaisesRegex(
__UpperCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a = AutoConfig.from_pretrained(__UpperCamelCase , revision='''aaaaaa''' )
def _A ( self: List[Any] ):
with self.assertRaisesRegex(
__UpperCamelCase , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
_a = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def _A ( self: str ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCamelCase ):
_a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
_a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__UpperCamelCase )
_a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCamelCase )
_a = AutoConfig.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def _A ( self: List[Any] ):
class UpperCAmelCase ( __snake_case ):
a: Optional[int] = "new-model"
try:
AutoConfig.register('''new-model''' , __UpperCamelCase )
# If remote code is not set, the default is to use local
_a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
_a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
_a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 487
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase :str = logging.get_logger(__name__)
class UpperCAmelCase ( __snake_case ):
a: Union[str, Any] = ["input_values", "attention_mask"]
def __init__( self: Dict , __UpperCamelCase: int = 1 , __UpperCamelCase: int = 1_6000 , __UpperCamelCase: float = 0.0 , __UpperCamelCase: bool = False , __UpperCamelCase: int = 80 , __UpperCamelCase: int = 16 , __UpperCamelCase: int = 64 , __UpperCamelCase: str = "hann_window" , __UpperCamelCase: float = 1.0 , __UpperCamelCase: float = 80 , __UpperCamelCase: float = 7600 , __UpperCamelCase: float = 1E-10 , __UpperCamelCase: int = 2 , __UpperCamelCase: bool = True , **__UpperCamelCase: List[Any] , ):
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
_a = do_normalize
_a = return_attention_mask
_a = num_mel_bins
_a = hop_length
_a = win_length
_a = win_function
_a = frame_signal_scale
_a = fmin
_a = fmax
_a = mel_floor
_a = reduction_factor
_a = win_length * sampling_rate // 1000
_a = hop_length * sampling_rate // 1000
_a = optimal_fft_length(self.sample_size )
_a = (self.n_fft // 2) + 1
_a = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
_a = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , __UpperCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , __UpperCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _A ( __UpperCamelCase: List[np.ndarray] , __UpperCamelCase: List[np.ndarray] , __UpperCamelCase: float = 0.0 ):
if attention_mask is not None:
_a = np.array(__UpperCamelCase , np.intaa )
_a = []
for vector, length in zip(__UpperCamelCase , attention_mask.sum(-1 ) ):
_a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_a = padding_value
normed_input_values.append(__UpperCamelCase )
else:
_a = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _A ( self: Optional[Any] , __UpperCamelCase: np.ndarray , ):
_a = spectrogram(
__UpperCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self: Optional[Any] , __UpperCamelCase: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCamelCase: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCamelCase: Union[bool, str, PaddingStrategy] = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: bool = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Optional[Union[str, TensorType]] = None , __UpperCamelCase: Optional[int] = None , **__UpperCamelCase: Any , ):
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
_a = self._process_audio(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )
else:
_a = None
if audio_target is not None:
_a = self._process_audio(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )
if inputs is None:
return inputs_target
else:
_a = inputs_target['''input_values''']
_a = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
_a = decoder_attention_mask
return inputs
def _A ( self: Optional[Any] , __UpperCamelCase: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCamelCase: bool = False , __UpperCamelCase: Union[bool, str, PaddingStrategy] = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: bool = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Optional[Union[str, TensorType]] = None , **__UpperCamelCase: Optional[int] , ):
_a = isinstance(__UpperCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_a = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
_a = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_a = speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [speech]
# needed to make pad() work on spectrogram inputs
_a = self.feature_size
# convert into correct format for padding
if is_target:
_a = [self._extract_mel_features(__UpperCamelCase ) for waveform in speech]
_a = BatchFeature({'''input_values''': features} )
_a = self.num_mel_bins
else:
_a = BatchFeature({'''input_values''': speech} )
_a = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
_a = feature_size_hack
# convert input values to correct format
_a = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
_a = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__UpperCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_a = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__UpperCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_a = input_values.astype(np.floataa )
# convert attention_mask to correct format
_a = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
_a = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_a = (
attention_mask
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_a = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=__UpperCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
def _A ( self: Optional[int] ):
_a = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_a = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 487
| 1
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
_lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def UpperCamelCase ( a , a ) -> Optional[Any]:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=UpperCamelCase__ )
def UpperCamelCase ( a , a ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = tmp_path_factory.getbasetemp() / '''cache'''
__magic_name__ = test_hf_cache_home / '''datasets'''
__magic_name__ = test_hf_cache_home / '''metrics'''
__magic_name__ = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(UpperCamelCase__ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(UpperCamelCase__ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(UpperCamelCase__ ) )
__magic_name__ = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(UpperCamelCase__ ) )
__magic_name__ = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(UpperCamelCase__ ) )
@pytest.fixture(autouse=UpperCamelCase__ , scope='''session''' )
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCamelCase__ )
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , UpperCamelCase__ )
@pytest.fixture
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , UpperCamelCase__ )
| 720
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = """instructblip_vision_model"""
def __init__( self : List[Any] , a__ : Union[str, Any]=1408 , a__ : Tuple=6144 , a__ : Dict=39 , a__ : Union[str, Any]=16 , a__ : int=224 , a__ : str=14 , a__ : Any="gelu" , a__ : Dict=1E-6 , a__ : Tuple=0.0 , a__ : List[str]=1E-10 , a__ : List[Any]=True , **a__ : List[str] , ):
super().__init__(**a__ )
__magic_name__ = hidden_size
__magic_name__ = intermediate_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = patch_size
__magic_name__ = image_size
__magic_name__ = initializer_range
__magic_name__ = attention_dropout
__magic_name__ = layer_norm_eps
__magic_name__ = hidden_act
__magic_name__ = qkv_bias
@classmethod
def snake_case__ ( cls : int , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ):
cls._set_token_in_kwargs(a__ )
__magic_name__ , __magic_name__ = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
__magic_name__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a__ , **a__ )
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :int = """instructblip_qformer"""
def __init__( self : str , a__ : Union[str, Any]=3_0522 , a__ : Any=768 , a__ : Tuple=12 , a__ : List[Any]=12 , a__ : Dict=3072 , a__ : Dict="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : str=0.02 , a__ : Tuple=1E-12 , a__ : str=0 , a__ : List[Any]="absolute" , a__ : Tuple=2 , a__ : Optional[int]=1408 , **a__ : Optional[int] , ):
super().__init__(pad_token_id=a__ , **a__ )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = cross_attention_frequency
__magic_name__ = encoder_hidden_size
@classmethod
def snake_case__ ( cls : Optional[int] , a__ : Union[str, os.PathLike] , **a__ : List[Any] ):
cls._set_token_in_kwargs(a__ )
__magic_name__ , __magic_name__ = cls.get_config_dict(a__ , **a__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
__magic_name__ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a__ , **a__ )
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[Any] = """instructblip"""
__SCREAMING_SNAKE_CASE :Dict = True
def __init__( self : List[Any] , a__ : Tuple=None , a__ : Union[str, Any]=None , a__ : Optional[int]=None , a__ : Tuple=32 , **a__ : Union[str, Any] ):
super().__init__(**a__ )
if vision_config is None:
__magic_name__ = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
__magic_name__ = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
__magic_name__ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
__magic_name__ = InstructBlipVisionConfig(**a__ )
__magic_name__ = InstructBlipQFormerConfig(**a__ )
__magic_name__ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__magic_name__ = CONFIG_MAPPING[text_model_type](**a__ )
__magic_name__ = self.text_config.tie_word_embeddings
__magic_name__ = self.text_config.is_encoder_decoder
__magic_name__ = num_query_tokens
__magic_name__ = self.vision_config.hidden_size
__magic_name__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__magic_name__ = 1.0
__magic_name__ = 0.02
@classmethod
def snake_case__ ( cls : Optional[int] , a__ : InstructBlipVisionConfig , a__ : InstructBlipQFormerConfig , a__ : PretrainedConfig , **a__ : Dict , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **a__ , )
def snake_case__ ( self : Dict ):
__magic_name__ = copy.deepcopy(self.__dict__ )
__magic_name__ = self.vision_config.to_dict()
__magic_name__ = self.qformer_config.to_dict()
__magic_name__ = self.text_config.to_dict()
__magic_name__ = self.__class__.model_type
return output
| 245
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = DPTImageProcessor if is_vision_available() else None
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = DPTImageProcessingTester(self )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 11
|
'''simple docstring'''
def __snake_case ( lowerCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__UpperCAmelCase = sorted(string.lower() )
return len(lowerCAmelCase ) == len(set(lowerCAmelCase ) )
if __name__ == "__main__":
_UpperCamelCase : List[str] = input('Enter a string ').strip()
_UpperCamelCase : List[Any] = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 396
| 0
|
SCREAMING_SNAKE_CASE__ = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
1_0: """a""",
1_1: """b""",
1_2: """c""",
1_3: """d""",
1_4: """e""",
1_5: """f""",
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
assert type(__lowerCamelCase ) in (int, float) and decimal == int(__lowerCamelCase )
lowercase_ = int(__lowerCamelCase )
lowercase_ = ""
lowercase_ = False
if decimal < 0:
lowercase_ = True
decimal *= -1
while decimal > 0:
lowercase_ , lowercase_ = divmod(__lowerCamelCase , 16 )
lowercase_ = values[remainder] + hexadecimal
lowercase_ = "0x" + hexadecimal
if negative:
lowercase_ = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 601
|
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = tuple[int, int, int]
SCREAMING_SNAKE_CASE__ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
SCREAMING_SNAKE_CASE__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
SCREAMING_SNAKE_CASE__ = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
SCREAMING_SNAKE_CASE__ = """FOBHMDKEXQNRAULPGSJVTYICZW"""
SCREAMING_SNAKE_CASE__ = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
SCREAMING_SNAKE_CASE__ = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
SCREAMING_SNAKE_CASE__ = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
SCREAMING_SNAKE_CASE__ = """SGLCPQWZHKXAREONTFBVIYJUDM"""
SCREAMING_SNAKE_CASE__ = """HVSICLTYKQUBXDWAJZOMFGPREN"""
SCREAMING_SNAKE_CASE__ = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
SCREAMING_SNAKE_CASE__ = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
SCREAMING_SNAKE_CASE__ = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: RotorPositionT , __lowerCamelCase: RotorSelectionT , __lowerCamelCase: str ):
'''simple docstring'''
if (unique_rotsel := len(set(__lowerCamelCase ) )) < 3:
lowercase_ = F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(__lowerCamelCase )
# Checks if rotor positions are valid
lowercase_ , lowercase_ , lowercase_ = rotpos
if not 0 < rotorposa <= len(__lowerCamelCase ):
lowercase_ = F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(__lowerCamelCase )
if not 0 < rotorposa <= len(__lowerCamelCase ):
lowercase_ = F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(__lowerCamelCase )
if not 0 < rotorposa <= len(__lowerCamelCase ):
lowercase_ = F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(__lowerCamelCase )
# Validates string and returns dict
lowercase_ = _plugboard(__lowerCamelCase )
return rotpos, rotsel, pbdict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = F'Plugboard setting isn\'t type string ({type(__lowerCamelCase )})'
raise TypeError(__lowerCamelCase )
elif len(__lowerCamelCase ) % 2 != 0:
lowercase_ = F'Odd number of symbols ({len(__lowerCamelCase )})'
raise Exception(__lowerCamelCase )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
lowercase_ = set()
for i in pbstring:
if i not in abc:
lowercase_ = F'\'{i}\' not in list of symbols'
raise Exception(__lowerCamelCase )
elif i in tmppbl:
lowercase_ = F'Duplicate symbol ({i})'
raise Exception(__lowerCamelCase )
else:
tmppbl.add(__lowerCamelCase )
del tmppbl
# Created the dictionary
lowercase_ = {}
for j in range(0 , len(__lowerCamelCase ) - 1 , 2 ):
lowercase_ = pbstring[j + 1]
lowercase_ = pbstring[j]
return pb
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: RotorPositionT , __lowerCamelCase: RotorSelectionT = (rotora, rotora, rotora) , __lowerCamelCase: str = "" , ):
'''simple docstring'''
lowercase_ = text.upper()
lowercase_ , lowercase_ , lowercase_ = _validator(
__lowerCamelCase , __lowerCamelCase , plugb.upper() )
lowercase_ , lowercase_ , lowercase_ = rotor_position
lowercase_ , lowercase_ , lowercase_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase_ = plugboard[symbol]
# rotor ra --------------------------
lowercase_ = abc.index(__lowerCamelCase ) + rotorposa
lowercase_ = rotora[index % len(__lowerCamelCase )]
# rotor rb --------------------------
lowercase_ = abc.index(__lowerCamelCase ) + rotorposa
lowercase_ = rotora[index % len(__lowerCamelCase )]
# rotor rc --------------------------
lowercase_ = abc.index(__lowerCamelCase ) + rotorposa
lowercase_ = rotora[index % len(__lowerCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase_ = reflector[symbol]
# 2nd rotors
lowercase_ = abc[rotora.index(__lowerCamelCase ) - rotorposa]
lowercase_ = abc[rotora.index(__lowerCamelCase ) - rotorposa]
lowercase_ = abc[rotora.index(__lowerCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
lowercase_ = 0
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
lowercase_ = 0
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
lowercase_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = """This is my Python script that emulates the Enigma machine from WWII."""
SCREAMING_SNAKE_CASE__ = (1, 1, 1)
SCREAMING_SNAKE_CASE__ = """pictures"""
SCREAMING_SNAKE_CASE__ = (rotora, rotora, rotora)
SCREAMING_SNAKE_CASE__ = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 601
| 1
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__SCREAMING_SNAKE_CASE : str ='\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__SCREAMING_SNAKE_CASE : int ='\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__SCREAMING_SNAKE_CASE : Optional[int] ='\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def a__ ( self , A , A , A = 1 , A = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 135
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE )
else:
lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
return out_tensor.tolist()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ord(SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : PreTrainedTokenizerBase
_lowercase : Union[bool, str, PaddingStrategy] = True
_lowercase : Optional[int] = None
_lowercase : Optional[int] = None
_lowercase : int = -100
_lowercase : str = "pt"
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
import torch
lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ = self.tokenizer.pad(
UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase__ = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ = [
list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels
]
else:
lowercase__ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels
]
lowercase__ = [feature['''ner_tags'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = [feature['''original_entity_spans'''] for feature in features]
lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 43
| 0
|
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : UNetaDModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__()
lowercase : str =value_function
lowercase : Union[str, Any] =unet
lowercase : List[str] =scheduler
lowercase : Optional[Any] =env
lowercase : Any =env.get_dataset()
lowercase : Tuple ={}
for key in self.data.keys():
try:
lowercase : Any =self.data[key].mean()
except: # noqa: E722
pass
lowercase : Tuple ={}
for key in self.data.keys():
try:
lowercase : Tuple =self.data[key].std()
except: # noqa: E722
pass
lowercase : Optional[int] =env.observation_space.shape[0]
lowercase : Dict =env.action_space.shape[0]
def A__ ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def A__ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def A__ ( self : str , UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
if type(UpperCAmelCase ) is dict:
return {k: self.to_torch(UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCAmelCase , device=self.unet.device )
def A__ ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
for key, val in cond.items():
lowercase : Dict =val.clone()
return x_in
def A__ ( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] =x.shape[0]
lowercase : str =None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase : List[Any] =torch.full((batch_size,) , UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase : Dict =self.value_function(x.permute(0 , 2 , 1 ) , UpperCAmelCase ).sample
lowercase : List[Any] =torch.autograd.grad([y.sum()] , [x] )[0]
lowercase : Union[str, Any] =self.scheduler._get_variance(UpperCAmelCase )
lowercase : Any =torch.exp(0.5 * posterior_variance )
lowercase : str =model_std * grad
lowercase : Any =0
lowercase : List[Any] =x.detach()
lowercase : Optional[Any] =x + scale * grad
lowercase : Any =self.reset_xa(UpperCAmelCase , UpperCAmelCase , self.action_dim )
lowercase : Dict =self.unet(x.permute(0 , 2 , 1 ) , UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase : Optional[int] =self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , predict_epsilon=UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
lowercase : Tuple =self.reset_xa(UpperCAmelCase , UpperCAmelCase , self.action_dim )
lowercase : Any =self.to_torch(UpperCAmelCase )
return x, y
def __call__( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str]=64 , UpperCAmelCase : Dict=32 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : List[str]=0.1 ) -> Any:
'''simple docstring'''
lowercase : Tuple =self.normalize(UpperCAmelCase , '''observations''' )
lowercase : Dict =obs[None].repeat(UpperCAmelCase , axis=0 )
lowercase : str ={0: self.to_torch(UpperCAmelCase )}
lowercase : Any =(batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase : Tuple =randn_tensor(UpperCAmelCase , device=self.unet.device )
lowercase : Optional[Any] =self.reset_xa(UpperCAmelCase , UpperCAmelCase , self.action_dim )
lowercase : Dict =self.to_torch(UpperCAmelCase )
# run the diffusion process
lowercase : Tuple =self.run_diffusion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# sort output trajectories by value
lowercase : int =y.argsort(0 , descending=UpperCAmelCase ).squeeze()
lowercase : Union[str, Any] =x[sorted_idx]
lowercase : Optional[int] =sorted_values[:, :, : self.action_dim]
lowercase : str =actions.detach().cpu().numpy()
lowercase : List[Any] =self.de_normalize(UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
lowercase : List[str] =0
else:
# if we didn't run value guiding, select a random action
lowercase : List[str] =np.random.randint(0 , UpperCAmelCase )
lowercase : List[Any] =denorm_actions[selected_index, 0]
return denorm_actions
| 718
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8
| 0
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def A__ ( ) -> List[str]:
A , A : Optional[Any] =9, 14 # noqa: F841
A : Optional[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A : Optional[Any] =defaultdict(lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A : Any =mst(lowercase )
A : Dict =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A : List[Any] =tuple(answer[:2] )
A : List[str] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 305
|
from __future__ import annotations
def A__ ( lowercase: int | str ) -> bool:
A : int =str(lowercase )
return n == n[::-1]
def A__ ( lowercase: int = 1_000_000 ) -> Any:
A : str =0
for i in range(1, lowercase ):
if is_palindrome(lowercase ) and is_palindrome(bin(lowercase ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 305
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__snake_case: List[Any] = logging.get_logger(__name__)
__snake_case: Optional[int] = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__snake_case: List[str] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__snake_case: Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = "whisper"
a_ = ["past_key_values"]
a_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , lowerCAmelCase_=5_18_65 , lowerCAmelCase_=80 , lowerCAmelCase_=6 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=4 , lowerCAmelCase_=15_36 , lowerCAmelCase_=15_36 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=5_02_57 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=15_00 , lowerCAmelCase_=4_48 , lowerCAmelCase_=5_02_56 , lowerCAmelCase_=5_02_56 , lowerCAmelCase_=5_02_56 , lowerCAmelCase_=None , lowerCAmelCase_=[2_20, 5_02_56] , lowerCAmelCase_=False , lowerCAmelCase_=2_56 , lowerCAmelCase_=False , lowerCAmelCase_=0.05 , lowerCAmelCase_=10 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0 , lowerCAmelCase_=10 , lowerCAmelCase_=0 , lowerCAmelCase_=7 , **lowerCAmelCase_ , ):
'''simple docstring'''
a_ : List[Any] = vocab_size
a_ : List[str] = num_mel_bins
a_ : str = d_model
a_ : Any = encoder_layers
a_ : List[str] = encoder_attention_heads
a_ : Any = decoder_layers
a_ : Dict = decoder_attention_heads
a_ : Any = decoder_ffn_dim
a_ : Optional[int] = encoder_ffn_dim
a_ : int = dropout
a_ : Optional[Any] = attention_dropout
a_ : Tuple = activation_dropout
a_ : Any = activation_function
a_ : Any = init_std
a_ : Optional[Any] = encoder_layerdrop
a_ : List[Any] = decoder_layerdrop
a_ : Tuple = use_cache
a_ : Optional[Any] = encoder_layers
a_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
a_ : int = max_source_positions
a_ : Optional[int] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
a_ : Optional[Any] = classifier_proj_size
a_ : str = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a_ : Tuple = apply_spec_augment
a_ : Optional[int] = mask_time_prob
a_ : Optional[int] = mask_time_length
a_ : List[Any] = mask_time_min_masks
a_ : Any = mask_feature_prob
a_ : Any = mask_feature_length
a_ : str = mask_feature_min_masks
a_ : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , suppress_tokens=lowerCAmelCase_ , begin_suppress_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
a_ : Dict = {0: """batch"""}
else:
a_ : Tuple = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction="""inputs""" )
return common_inputs
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = 2_20_50 , lowerCAmelCase_ = 5.0 , lowerCAmelCase_ = 2_20 , ):
'''simple docstring'''
a_ : Dict = OrderedDict()
a_ : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCAmelCase_ , framework=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , time_duration=lowerCAmelCase_ , frequency=lowerCAmelCase_ , )
a_ : List[Any] = encoder_inputs["""input_features"""].shape[2]
a_ : Optional[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
a_ : Tuple = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Any = encoder_inputs.pop("""input_features""" )
a_ : Optional[Any] = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
a_ : Any = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return 1E-3
| 460
|
'''simple docstring'''
def _snake_case ( A_ : list ):
"""simple docstring"""
if len(A_ ) <= 1:
return [tuple(A_ )]
a_ : List[Any] = []
def generate(A_ : int , A_ : list ):
a_ : List[Any] = [0] * n
res.append(tuple(A_ ) )
a_ : List[str] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
a_ , a_ : Union[str, Any] = arr[i], arr[0]
else:
a_ , a_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(A_ ) )
c[i] += 1
a_ : Optional[Any] = 0
else:
a_ : Union[str, Any] = 0
i += 1
generate(len(A_ ) , A_ )
return res
if __name__ == "__main__":
__snake_case: Dict = input("Enter numbers separated by a comma:\n").strip()
__snake_case: Optional[Any] = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 460
| 1
|
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def UpperCAmelCase ( snake_case : int , snake_case : Any , snake_case : Any=1024 , snake_case : Tuple=1024 , snake_case : Optional[int]=False , **snake_case : str ):
_lowerCAmelCase:Optional[Any] = AutoTokenizer.from_pretrained(lowercase_ )
_lowerCAmelCase:int = SeqaSeqDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ , type_path='''train''' , **lowercase_ )
_lowerCAmelCase:List[Any] = tok.pad_token_id
def get_lens(snake_case : Union[str, Any] ):
_lowerCAmelCase:List[str] = tqdm(
DataLoader(lowercase_ , batch_size=512 , num_workers=8 , shuffle=lowercase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_lowerCAmelCase:Union[str, Any] = []
for batch in dl:
_lowerCAmelCase:int = batch['''input_ids'''].ne(lowercase_ ).sum(1 ).tolist()
_lowerCAmelCase:Any = batch['''labels'''].ne(lowercase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase_ , lowercase_ ):
max_lens.append(max(lowercase_ , lowercase_ ) )
else:
max_lens.extend(lowercase_ )
return max_lens
_lowerCAmelCase:Tuple = get_lens(lowercase_ )
_lowerCAmelCase:int = SeqaSeqDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ , type_path='''val''' , **lowercase_ )
_lowerCAmelCase:str = get_lens(lowercase_ )
pickle_save(lowercase_ , train_ds.len_file )
pickle_save(lowercase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 227
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModel)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 326
| 0
|
"""simple docstring"""
import functools
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
@functools.cache
def min_distance(lowercase__ : int , lowercase__ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCAmelCase_ :List[str] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowercase__ ) , 1 + min_distance(lowercase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Dict = AudioLDMPipeline
UpperCAmelCase_ :Optional[int] = TEXT_TO_AUDIO_PARAMS
UpperCAmelCase_ :Optional[Any] = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCAmelCase_ :Union[str, Any] = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__A , )
lowerCAmelCase_ :Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :int = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
lowerCAmelCase_ :int = ClapTextModelWithProjection(__A )
lowerCAmelCase_ :Tuple = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
lowerCAmelCase_ :Optional[Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__A , )
lowerCAmelCase_ :List[str] = SpeechTaHifiGan(__A )
lowerCAmelCase_ :List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[Any]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :int = torch.manual_seed(__A )
else:
lowerCAmelCase_ :int = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[str] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :Optional[Any] = self.get_dummy_components()
lowerCAmelCase_ :Tuple = AudioLDMPipeline(**__A )
lowerCAmelCase_ :int = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :int = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Any = audioldm_pipe(**__A )
lowerCAmelCase_ :Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(__A ) == 256
lowerCAmelCase_ :Optional[int] = audio[:10]
lowerCAmelCase_ :str = np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Any = self.get_dummy_components()
lowerCAmelCase_ :str = AudioLDMPipeline(**__A )
lowerCAmelCase_ :Optional[Any] = audioldm_pipe.to(__A )
lowerCAmelCase_ :List[Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Tuple = 3 * [inputs["""prompt"""]]
# forward
lowerCAmelCase_ :Dict = audioldm_pipe(**__A )
lowerCAmelCase_ :Optional[Any] = output.audios[0]
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = 3 * [inputs.pop("""prompt""" )]
lowerCAmelCase_ :Union[str, Any] = audioldm_pipe.tokenizer(
__A , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__A , return_tensors="""pt""" , )
lowerCAmelCase_ :str = text_inputs["""input_ids"""].to(__A )
lowerCAmelCase_ :List[str] = audioldm_pipe.text_encoder(
__A , )
lowerCAmelCase_ :Tuple = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase_ :Any = F.normalize(__A , dim=-1 )
lowerCAmelCase_ :Tuple = prompt_embeds
# forward
lowerCAmelCase_ :Tuple = audioldm_pipe(**__A )
lowerCAmelCase_ :int = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Optional[Any] = self.get_dummy_components()
lowerCAmelCase_ :List[Any] = AudioLDMPipeline(**__A )
lowerCAmelCase_ :str = audioldm_pipe.to(__A )
lowerCAmelCase_ :Union[str, Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[Any] = 3 * ["""this is a negative prompt"""]
lowerCAmelCase_ :str = negative_prompt
lowerCAmelCase_ :Optional[Any] = 3 * [inputs["""prompt"""]]
# forward
lowerCAmelCase_ :Dict = audioldm_pipe(**__A )
lowerCAmelCase_ :Tuple = output.audios[0]
lowerCAmelCase_ :int = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = 3 * [inputs.pop("""prompt""" )]
lowerCAmelCase_ :Any = []
for p in [prompt, negative_prompt]:
lowerCAmelCase_ :Any = audioldm_pipe.tokenizer(
__A , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__A , return_tensors="""pt""" , )
lowerCAmelCase_ :Tuple = text_inputs["""input_ids"""].to(__A )
lowerCAmelCase_ :Optional[Any] = audioldm_pipe.text_encoder(
__A , )
lowerCAmelCase_ :Dict = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase_ :str = F.normalize(__A , dim=-1 )
embeds.append(__A )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = embeds
# forward
lowerCAmelCase_ :Tuple = audioldm_pipe(**__A )
lowerCAmelCase_ :int = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :int = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :str = self.get_dummy_components()
lowerCAmelCase_ :Dict = PNDMScheduler(skip_prk_steps=__A )
lowerCAmelCase_ :Dict = AudioLDMPipeline(**__A )
lowerCAmelCase_ :int = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = """egg cracking"""
lowerCAmelCase_ :Tuple = audioldm_pipe(**__A , negative_prompt=__A )
lowerCAmelCase_ :Dict = output.audios[0]
assert audio.ndim == 1
assert len(__A ) == 256
lowerCAmelCase_ :List[Any] = audio[:10]
lowerCAmelCase_ :Tuple = np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :Optional[int] = self.get_dummy_components()
lowerCAmelCase_ :List[Any] = PNDMScheduler(skip_prk_steps=__A )
lowerCAmelCase_ :Union[str, Any] = AudioLDMPipeline(**__A )
lowerCAmelCase_ :Optional[Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Tuple = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
lowerCAmelCase_ :int = audioldm_pipe(__A , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCAmelCase_ :List[str] = 2
lowerCAmelCase_ :Optional[int] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :Dict = audioldm_pipe(__A , num_inference_steps=2 , num_waveforms_per_prompt=__A ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowerCAmelCase_ :str = 2
lowerCAmelCase_ :str = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__A ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :List[str] = self.get_dummy_components()
lowerCAmelCase_ :List[Any] = AudioLDMPipeline(**__A )
lowerCAmelCase_ :Union[str, Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
lowerCAmelCase_ :int = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = audioldm_pipe(audio_length_in_s=0.0_1_6 , **__A )
lowerCAmelCase_ :Dict = output.audios[0]
assert audio.ndim == 1
assert len(__A ) / vocoder_sampling_rate == 0.0_1_6
lowerCAmelCase_ :List[str] = audioldm_pipe(audio_length_in_s=0.0_3_2 , **__A )
lowerCAmelCase_ :Any = output.audios[0]
assert audio.ndim == 1
assert len(__A ) / vocoder_sampling_rate == 0.0_3_2
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Any = self.get_dummy_components()
lowerCAmelCase_ :List[str] = AudioLDMPipeline(**__A )
lowerCAmelCase_ :Dict = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Tuple = ["""hey"""]
lowerCAmelCase_ :Any = audioldm_pipe(__A , num_inference_steps=1 )
lowerCAmelCase_ :List[Any] = output.audios.shape
assert audio_shape == (1, 256)
lowerCAmelCase_ :Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCAmelCase_ :Optional[int] = SpeechTaHifiGan(__A ).to(__A )
lowerCAmelCase_ :Optional[int] = audioldm_pipe(__A , num_inference_steps=1 )
lowerCAmelCase_ :str = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__A )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(test_mean_pixel_difference=__A )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__A )
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , __A , __A="cpu" , __A=torch.floataa , __A=0 ) -> str:
lowerCAmelCase_ :Tuple = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Tuple = np.random.RandomState(__A ).standard_normal((1, 8, 128, 16) )
lowerCAmelCase_ :Optional[Any] = torch.from_numpy(__A ).to(device=__A , dtype=__A )
lowerCAmelCase_ :Any = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[int] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowerCAmelCase_ :Optional[Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Tuple = self.get_inputs(__A )
lowerCAmelCase_ :Any = 25
lowerCAmelCase_ :Optional[Any] = audioldm_pipe(**__A ).audios[0]
assert audio.ndim == 1
assert len(__A ) == 8_1920
lowerCAmelCase_ :List[Any] = audio[7_7230:7_7240]
lowerCAmelCase_ :List[str] = np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] )
lowerCAmelCase_ :Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowerCAmelCase_ :List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCAmelCase_ :Optional[Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = self.get_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = audioldm_pipe(**__A ).audios[0]
assert audio.ndim == 1
assert len(__A ) == 8_1920
lowerCAmelCase_ :List[str] = audio[2_7780:2_7790]
lowerCAmelCase_ :str = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] )
lowerCAmelCase_ :int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 256
| 1
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int = 4_00_00_00 ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = [0, 1]
UpperCAmelCase_ : Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCAmelCase_ : Dict = 0
for j in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 71
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 191
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , a : Dict , a : List[Any]=13 , a : List[str]=7 , a : Optional[int]=True , a : str=True , a : Optional[int]=True , a : str=True , a : str=99 , a : str=32 , a : List[str]=2 , a : List[str]=4 , a : Union[str, Any]=37 , a : List[str]="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Tuple=512 , a : Optional[int]=16 , a : int=2 , a : Any=0.02 , a : Any=3 , a : Dict=4 , a : List[str]=None , a : Tuple=1_000 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = range_bbox
def SCREAMING_SNAKE_CASE_ ( self : int )-> Any:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowercase__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__ = bbox[i, j, 3]
lowercase__ = bbox[i, j, 1]
lowercase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__ = bbox[i, j, 2]
lowercase__ = bbox[i, j, 0]
lowercase__ = t
lowercase__ = tf.convert_to_tensor(a )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] , a : List[str] , a : Union[str, Any] , a : Union[str, Any] , a : int , a : Dict , a : int , a : Any )-> List[str]:
"""simple docstring"""
lowercase__ = TFLayoutLMModel(config=a )
lowercase__ = model(a , a , attention_mask=a , token_type_ids=a )
lowercase__ = model(a , a , token_type_ids=a )
lowercase__ = model(a , a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Any , a : Union[str, Any] , a : Tuple , a : Dict , a : Dict , a : Dict , a : Tuple , a : List[str] )-> int:
"""simple docstring"""
lowercase__ = TFLayoutLMForMaskedLM(config=a )
lowercase__ = model(a , a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : List[str] , a : List[str] , a : Union[str, Any] , a : List[str] , a : List[Any] , a : Dict , a : Any , a : Optional[int] )-> Tuple:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFLayoutLMForSequenceClassification(config=a )
lowercase__ = model(a , a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Union[str, Any] , a : Tuple , a : Optional[Any] , a : Optional[int] , a : List[Any] , a : List[str] , a : Optional[int] , a : List[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFLayoutLMForTokenClassification(config=a )
lowercase__ = model(a , a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[Any] , a : Optional[Any] , a : int , a : int , a : Union[str, Any] , a : Any , a : Optional[int] , a : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = TFLayoutLMForQuestionAnswering(config=a )
lowercase__ = model(a , a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[str] = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : List[str] = False
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : Optional[int] = 10
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
lowercase__ = TFLayoutLMModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Tuple:
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFLayoutLMModel.from_pretrained(a )
self.assertIsNotNone(a )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> str:
"""simple docstring"""
pass
def __UpperCamelCase () -> Tuple:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowercase__ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
lowercase__ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowercase__ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowercase__ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowercase__ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ = model(input_ids=a , bbox=a , attention_mask=a , token_type_ids=a )
# test the sequence output on [0, :3, :3]
lowercase__ = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-3 ) )
# test the pooled output on [1, :3]
lowercase__ = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , a , atol=1E-3 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ = model(
input_ids=a , bbox=a , attention_mask=a , token_type_ids=a , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowercase__ = outputs.loss
lowercase__ = (2,)
self.assertEqual(loss.shape , a )
# test the shape of the logits
lowercase__ = outputs.logits
lowercase__ = (2, 2)
self.assertEqual(logits.shape , a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ = model(
input_ids=a , bbox=a , attention_mask=a , token_type_ids=a , labels=a )
# test the shape of the logits
lowercase__ = outputs.logits
lowercase__ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
lowercase__ = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ = model(input_ids=a , bbox=a , attention_mask=a , token_type_ids=a )
# test the shape of the logits
lowercase__ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , a )
self.assertEqual(outputs.end_logits.shape , a )
| 45
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = value_function
lowercase__ = unet
lowercase__ = scheduler
lowercase__ = env
lowercase__ = env.get_dataset()
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].std()
except: # noqa: E722
pass
lowercase__ = env.observation_space.shape[0]
lowercase__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
for key, val in cond.items():
lowercase__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = x.shape[0]
lowercase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ = self.scheduler._get_variance(a )
lowercase__ = torch.exp(0.5 * posterior_variance )
lowercase__ = model_std * grad
lowercase__ = 0
lowercase__ = x.detach()
lowercase__ = x + scale * grad
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
return x, y
def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.normalize(a , 'observations' )
lowercase__ = obs[None].repeat(a , axis=0 )
lowercase__ = {0: self.to_torch(a )}
lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ = randn_tensor(a , device=self.unet.device )
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
# run the diffusion process
lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
lowercase__ = y.argsort(0 , descending=a ).squeeze()
lowercase__ = x[sorted_idx]
lowercase__ = sorted_values[:, :, : self.action_dim]
lowercase__ = actions.detach().cpu().numpy()
lowercase__ = self.de_normalize(a , key='actions' )
# select the action with the highest value
if y is not None:
lowercase__ = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ = np.random.randint(0 , a )
lowercase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 45
| 1
|
'''simple docstring'''
def a_ ( _lowerCAmelCase=28123 ) -> Union[str, Any]:
__lowerCamelCase : Any = [1] * (limit + 1)
for i in range(2 ,int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 ,limit // i + 1 ):
sum_divs[k * i] += k + i
__lowerCamelCase : Tuple = set()
__lowerCamelCase : Optional[Any] = 0
for n in range(1 ,limit + 1 ):
if sum_divs[n] > n:
abundants.add(_lowerCAmelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 459
|
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCamelCase = 256
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =["""melgan"""]
def __init__( self : Dict , _a : SpectrogramNotesEncoder , _a : SpectrogramContEncoder , _a : TaFilmDecoder , _a : DDPMScheduler , _a : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
__lowerCamelCase : Any = math.log(1e-5 ) # Matches MelGAN training.
__lowerCamelCase : List[Any] = 4.0 # Largest value for most examples
__lowerCamelCase : Tuple = 128
self.register_modules(
notes_encoder=_a , continuous_encoder=_a , decoder=_a , scheduler=_a , melgan=_a , )
def _lowercase ( self : Tuple , _a : int , _a : List[Any]=(-1.0, 1.0) , _a : Any=False ) -> Dict:
__lowerCamelCase ,__lowerCamelCase : Any = output_range
if clip:
__lowerCamelCase : List[Any] = torch.clip(_a , self.min_value , self.max_value )
# Scale to [0, 1].
__lowerCamelCase : Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _lowercase ( self : Dict , _a : List[str] , _a : int=(-1.0, 1.0) , _a : Dict=False ) -> List[str]:
__lowerCamelCase ,__lowerCamelCase : List[Any] = input_range
__lowerCamelCase : Optional[Any] = torch.clip(_a , _a , _a ) if clip else outputs
# Scale to [0, 1].
__lowerCamelCase : str = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _lowercase ( self : int , _a : Dict , _a : List[str] , _a : Tuple ) -> Any:
__lowerCamelCase : Tuple = input_tokens > 0
__lowerCamelCase ,__lowerCamelCase : int = self.notes_encoder(
encoder_input_tokens=_a , encoder_inputs_mask=_a )
__lowerCamelCase ,__lowerCamelCase : Tuple = self.continuous_encoder(
encoder_inputs=_a , encoder_inputs_mask=_a )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _lowercase ( self : Tuple , _a : Tuple , _a : List[Any] , _a : int ) -> Dict:
__lowerCamelCase : Any = noise_time
if not torch.is_tensor(_a ):
__lowerCamelCase : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
__lowerCamelCase : List[str] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase : Tuple = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase : int = self.decoder(
encodings_and_masks=_a , decoder_input_tokens=_a , decoder_noise_time=_a )
return logits
@torch.no_grad()
def __call__( self : Optional[int] , _a : List[List[int]] , _a : Optional[torch.Generator] = None , _a : int = 100 , _a : bool = True , _a : str = "numpy" , _a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _a : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_a , _a ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(_a )}.' )
__lowerCamelCase : Optional[int] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__lowerCamelCase : Dict = np.zeros([1, 0, self.n_dims] , np.floataa )
__lowerCamelCase : List[Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_a , device=self.device )
for i, encoder_input_tokens in enumerate(_a ):
if i == 0:
__lowerCamelCase : List[str] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__lowerCamelCase : List[Any] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_a , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__lowerCamelCase : int = ones
__lowerCamelCase : int = self.scale_features(
_a , output_range=[-1.0, 1.0] , clip=_a )
__lowerCamelCase : Tuple = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_a , continuous_mask=_a , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__lowerCamelCase : Optional[int] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_a , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_a )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__lowerCamelCase : List[Any] = self.decode(
encodings_and_masks=_a , input_tokens=_a , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__lowerCamelCase : Optional[int] = self.scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__lowerCamelCase : List[Any] = self.scale_to_features(_a , input_range=[-1.0, 1.0] )
__lowerCamelCase : Union[str, Any] = mel[:1]
__lowerCamelCase : Union[str, Any] = mel.cpu().float().numpy()
__lowerCamelCase : Dict = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_a , _a )
logger.info('Generated segment' , _a )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
__lowerCamelCase : Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__lowerCamelCase : List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_a )
| 459
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Dict = "van"
def __init__( self: Optional[int] ,lowerCamelCase_: Optional[int]=224 ,lowerCamelCase_: List[str]=3 ,lowerCamelCase_: Dict=[7, 3, 3, 3] ,lowerCamelCase_: Any=[4, 2, 2, 2] ,lowerCamelCase_: List[Any]=[64, 128, 320, 512] ,lowerCamelCase_: Tuple=[3, 3, 12, 3] ,lowerCamelCase_: List[Any]=[8, 8, 4, 4] ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: str=0.0_2 ,lowerCamelCase_: Optional[Any]=1e-6 ,lowerCamelCase_: Optional[int]=1e-2 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,**lowerCamelCase_: Union[str, Any] ,) -> List[Any]:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Tuple = patch_sizes
UpperCAmelCase_ : Dict = strides
UpperCAmelCase_ : Any = hidden_sizes
UpperCAmelCase_ : List[str] = depths
UpperCAmelCase_ : List[str] = mlp_ratios
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : Optional[int] = layer_scale_init_value
UpperCAmelCase_ : Optional[Any] = drop_path_rate
UpperCAmelCase_ : Tuple = dropout_rate
| 322
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
UpperCamelCase_ = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
UpperCamelCase_ = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
UpperCamelCase_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase_ = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase_ = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Dict = VOCAB_FILES_NAMES
A__ : Dict = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ : Dict = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Dict = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = DPRContextEncoderTokenizer
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Tuple = VOCAB_FILES_NAMES
A__ : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = DPRQuestionEncoderTokenizer
UpperCamelCase_ = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
UpperCamelCase_ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
UpperCamelCase_ = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__snake_case )
class _snake_case :
'''simple docstring'''
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[str] = None ,lowerCamelCase_: Optional[str] = None ,lowerCamelCase_: Union[bool, str] = False ,lowerCamelCase_: Union[bool, str] = False ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,lowerCamelCase_: Optional[bool] = None ,**lowerCamelCase_: Optional[Any] ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,**lowerCamelCase_ ,)
elif titles is None or texts is None:
UpperCAmelCase_ : Tuple = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ ,lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : Any = titles if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [titles]
UpperCAmelCase_ : Tuple = texts if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [texts]
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
UpperCAmelCase_ : int = questions if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), F'''There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts.'''
UpperCAmelCase_ : int = super().__call__(lowerCamelCase_ ,lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ )["""input_ids"""]
UpperCAmelCase_ : str = super().__call__(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ )["""input_ids"""]
UpperCAmelCase_ : Any = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ ,lowerCamelCase_ )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : List[str] = attention_mask
return self.pad(lowerCamelCase_ ,padding=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: BatchEncoding ,lowerCamelCase_: DPRReaderOutput ,lowerCamelCase_: int = 16 ,lowerCamelCase_: int = 64 ,lowerCamelCase_: int = 4 ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Optional[int] = reader_input["""input_ids"""]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sorted(range(lowerCamelCase_ ) ,reverse=lowerCamelCase_ ,key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : List[str] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : Optional[int] = len(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=lowerCamelCase_ ,top_spans=lowerCamelCase_ ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=lowerCamelCase_ ,start_index=lowerCamelCase_ ,end_index=lowerCamelCase_ ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def A__ ( self: Any ,lowerCamelCase_: List[int] ,lowerCamelCase_: List[int] ,lowerCamelCase_: int ,lowerCamelCase_: int ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Union[str, Any] = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : Optional[int] = sorted(lowerCamelCase_ ,key=lambda lowerCamelCase_ : x[1] ,reverse=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
UpperCAmelCase_ : Any = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class _snake_case ( __snake_case , __snake_case ):
'''simple docstring'''
A__ : int = VOCAB_FILES_NAMES
A__ : Tuple = READER_PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[str] = READER_PRETRAINED_INIT_CONFIGURATION
A__ : int = ["input_ids", "attention_mask"]
A__ : str = DPRReaderTokenizer
| 322
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 462
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = ReformerTokenizer
_lowerCamelCase = ReformerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : int = "<s>"
_snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1_000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : int = "I was born in 92000, and this is falsé."
_snake_case : Tuple = tokenizer.tokenize(lowercase_ )
_snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : List[Any] = tokenizer.encode(lowercase_ )
_snake_case : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
_snake_case : List[str] = "This is a simple input"
_snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_snake_case : Union[str, Any] = ("This is a simple input", "This is a pair")
_snake_case : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
_snake_case : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase ( self ):
_snake_case : int = "Hello World!"
_snake_case : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : str = " ".join(lowercase_ )
_snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" )
_snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_snake_case : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape
_snake_case : List[str] = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , )
| 670
| 0
|
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = '''AutoTokenizer'''
UpperCAmelCase : Optional[Any] = ['''tokenizer''']
UpperCAmelCase : List[str] = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=None ):
super().__init__(_UpperCAmelCase )
_A = speaker_embeddings
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int="speaker_embeddings_path.json" , **_UpperCAmelCase : int ):
if speaker_embeddings_dict_path is not None:
_A = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , subfolder=kwargs.pop('subfolder' , _UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCAmelCase ) , force_download=kwargs.pop('force_download' , _UpperCAmelCase ) , proxies=kwargs.pop('proxies' , _UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCAmelCase ) , revision=kwargs.pop('revision' , _UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(_UpperCAmelCase , _UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_A = None
else:
with open(_UpperCAmelCase ) as speaker_embeddings_json:
_A = json.load(_UpperCAmelCase )
else:
_A = None
_A = AutoTokenizer.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
return cls(tokenizer=_UpperCAmelCase , speaker_embeddings=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : int="speaker_embeddings_path.json" , _UpperCAmelCase : Union[str, Any]="speaker_embeddings" , _UpperCAmelCase : bool = False , **_UpperCAmelCase : Tuple , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCAmelCase , _UpperCAmelCase , 'v2' ) , exist_ok=_UpperCAmelCase )
_A = {}
_A = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_A = self._load_voice_preset(_UpperCAmelCase )
_A = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , _UpperCAmelCase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_UpperCAmelCase , )
_A = os.path.join(_UpperCAmelCase , F'''{prompt_key}_{key}.npy''' )
_A = tmp_dict
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
super().save_pretrained(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str = None , **_UpperCAmelCase : Optional[int] ):
_A = self.speaker_embeddings[voice_preset]
_A = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_A = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , _UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCAmelCase ) , force_download=kwargs.pop('force_download' , _UpperCAmelCase ) , proxies=kwargs.pop('proxies' , _UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCAmelCase ) , revision=kwargs.pop('revision' , _UpperCAmelCase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_A = np.load(_UpperCAmelCase )
return voice_preset_dict
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : List[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[str]="pt" , _UpperCAmelCase : Union[str, Any]=256 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=False , **_UpperCAmelCase : Any , ):
if voice_preset is not None and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_A = self._load_voice_preset(_UpperCAmelCase )
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
_A = voice_preset + '.npz'
_A = np.load(_UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCAmelCase , **_UpperCAmelCase )
_A = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
_A = self.tokenizer(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
if voice_preset is not None:
_A = voice_preset
return encoded_text
| 505
|
"""simple docstring"""
a = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
a = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 505
| 1
|
"""simple docstring"""
import random
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = [ord(lowerCAmelCase_) for i in text]
lowercase_ = []
lowercase_ = []
for i in plain:
lowercase_ = random.randint(1 , 3_0_0)
lowercase_ = (i + k) * k
cipher.append(lowerCAmelCase_)
key.append(lowerCAmelCase_)
return cipher, key
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : list[int] , lowerCAmelCase_ : list[int]):
"""simple docstring"""
lowercase_ = []
for i in range(len(lowerCAmelCase_)):
lowercase_ = int((cipher[i] - (key[i]) ** 2) / key[i])
plain.append(chr(lowerCAmelCase_))
return "".join(lowerCAmelCase_)
if __name__ == "__main__":
UpperCAmelCase , UpperCAmelCase : str = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 567
|
"""simple docstring"""
from math import factorial
UpperCAmelCase : Tuple = {str(d): factorial(d) for d in range(10)}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(__lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __lowerCAmelCase ) if sum_of_digit_factorial(__lowerCAmelCase ) == i )
if __name__ == "__main__":
print(F"{solution() = }")
| 567
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCamelCase_ ( A__ , A__=False ):
a_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCamelCase_ ( A__ , A__ , A__=False ):
for i in range(config.num_hidden_layers ):
if base_model:
a_ = """"""
else:
a_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
a_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[
: config.hidden_size, :
]
a_ = in_proj_bias[: config.hidden_size]
a_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ = in_proj_weight[
-config.hidden_size :, :
]
a_ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( A__ ):
a_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def UpperCamelCase_ ( A__ ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
a_ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def UpperCamelCase_ ( A__ , A__ , A__ ):
a_ = dct.pop(A__ )
a_ = val
def UpperCamelCase_ ( A__ , A__ ):
a_ = ViTMSNConfig()
a_ = 10_00
a_ = """datasets/huggingface/label-files"""
a_ = """imagenet-1k-id2label.json"""
a_ = json.load(open(hf_hub_download(A__ , A__ ) , """r""" ) )
a_ = {int(A__ ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
a_ = 3_84
a_ = 15_36
a_ = 6
elif "l16" in checkpoint_url:
a_ = 10_24
a_ = 40_96
a_ = 24
a_ = 16
a_ = 0.1
elif "b4" in checkpoint_url:
a_ = 4
elif "l7" in checkpoint_url:
a_ = 7
a_ = 10_24
a_ = 40_96
a_ = 24
a_ = 16
a_ = 0.1
a_ = ViTMSNModel(A__ )
a_ = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" )["""target_encoder"""]
a_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(A__ )
a_ = create_rename_keys(A__ , base_model=A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , base_model=A__ )
model.load_state_dict(A__ )
model.eval()
a_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ = Image.open(requests.get(A__ , stream=A__ ).raw )
a_ = ViTImageProcessor(
size=config.image_size , image_mean=A__ , image_std=A__ )
a_ = image_processor(images=A__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
a_ = model(**A__ )
a_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
a_ = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
a_ = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
a_ = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
a_ = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
a_ = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , A__ , atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase__ =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 511
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ =16
lowercase__ =32
def UpperCamelCase_ ( A__ , A__ = 16 ):
a_ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
a_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
a_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a_ = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a_ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a_ = 16
elif accelerator.mixed_precision != "no":
a_ = 8
else:
a_ = None
return tokenizer.pad(
A__ , padding="""longest""" , max_length=A__ , pad_to_multiple_of=A__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
a_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
a_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ =mocked_dataloaders # noqa: F811
def UpperCamelCase_ ( A__ , A__ ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , A__ ) == "1":
a_ = 2
# Initialize accelerator
a_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a_ = config["""lr"""]
a_ = int(config["""num_epochs"""] )
a_ = int(config["""seed"""] )
a_ = int(config["""batch_size"""] )
a_ = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
a_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a_ = batch_size // MAX_GPU_BATCH_SIZE
a_ = MAX_GPU_BATCH_SIZE
set_seed(A__ )
a_ , a_ = get_dataloaders(A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a_ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a_ = model.to(accelerator.device )
# Instantiate optimizer
a_ = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
a_ = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=1_00 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a_ , a_ , a_ , a_ , a_ = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a_ = model(**A__ )
a_ = outputs.loss
a_ = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
a_ = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a_ = model(**A__ )
a_ = outputs.logits.argmax(dim=-1 )
a_ , a_ = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(A__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
a_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
a_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=A__ , references=A__ , )
a_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , A__ )
def UpperCamelCase_ ( ):
a_ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=A__ , default=A__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
a_ = parser.parse_args()
a_ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 511
| 1
|
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
SCREAMING_SNAKE_CASE : int = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE : str = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE : Optional[Any] = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
def __a ( __lowerCAmelCase , __lowerCAmelCase = 0 ) -> list:
SCREAMING_SNAKE_CASE : int = length or len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Any = True
return list_data if not swapped else bubble_sort(__lowerCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] ) -> Optional[int]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] ) -> List[str]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Dict ) -> Optional[int]:
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : List[Any] =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ) -> int:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =0
# Doctest custom flag to ignore output.
_lowercase = doctest.register_optionflag("""IGNORE_RESULT""")
_lowercase = doctest.OutputChecker
class lowercase_ ( A ):
def _snake_case ( self , __A , __A , __A ) -> List[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __A , __A , __A )
_lowercase = CustomOutputChecker
_lowercase = HfDoctestModule
_lowercase = HfDocTestParser
| 717
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] ) -> Optional[int]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] ) -> List[str]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Dict ) -> Optional[int]:
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : List[Any] =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ) -> int:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =0
# Doctest custom flag to ignore output.
_lowercase = doctest.register_optionflag("""IGNORE_RESULT""")
_lowercase = doctest.OutputChecker
class lowercase_ ( A ):
def _snake_case ( self , __A , __A , __A ) -> List[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __A , __A , __A )
_lowercase = CustomOutputChecker
_lowercase = HfDoctestModule
_lowercase = HfDocTestParser
| 431
| 0
|
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int]="attention" ):
'''simple docstring'''
UpperCAmelCase_ = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
UpperCAmelCase_ = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
UpperCAmelCase_ = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
UpperCAmelCase_ = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str=False ):
'''simple docstring'''
if split_mlp_wi:
UpperCAmelCase_ = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
UpperCAmelCase_ = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
UpperCAmelCase_ = (wi_a, wi_a)
else:
UpperCAmelCase_ = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
UpperCAmelCase_ = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict ):
'''simple docstring'''
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def __lowerCamelCase ( _UpperCamelCase : dict , *, _UpperCamelCase : int , _UpperCamelCase : bool ):
'''simple docstring'''
UpperCAmelCase_ = traverse_util.flatten_dict(variables['''target'''] )
UpperCAmelCase_ = {'''/'''.join(_UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCAmelCase_ = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , _UpperCamelCase )
UpperCAmelCase_ = collections.OrderedDict()
# Shared embeddings.
UpperCAmelCase_ = old['''token_embedder/embedding''']
# Encoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase_ = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , '''encoder''' , '''attention''' )
UpperCAmelCase_ = layer_norm
UpperCAmelCase_ = k.T
UpperCAmelCase_ = o.T
UpperCAmelCase_ = q.T
UpperCAmelCase_ = v.T
# Block i, layer 1 (MLP).
UpperCAmelCase_ = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , '''encoder''' , _UpperCamelCase )
UpperCAmelCase_ = layer_norm
if split_mlp_wi:
UpperCAmelCase_ = wi[0].T
UpperCAmelCase_ = wi[1].T
else:
UpperCAmelCase_ = wi.T
UpperCAmelCase_ = wo.T
UpperCAmelCase_ = old[
'''encoder/relpos_bias/rel_embedding'''
].T
UpperCAmelCase_ = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase_ = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , '''decoder''' , '''self_attention''' )
UpperCAmelCase_ = layer_norm
UpperCAmelCase_ = k.T
UpperCAmelCase_ = o.T
UpperCAmelCase_ = q.T
UpperCAmelCase_ = v.T
# Block i, layer 1 (Cross Attention).
UpperCAmelCase_ = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , '''decoder''' , '''encoder_decoder_attention''' )
UpperCAmelCase_ = layer_norm
UpperCAmelCase_ = k.T
UpperCAmelCase_ = o.T
UpperCAmelCase_ = q.T
UpperCAmelCase_ = v.T
# Block i, layer 2 (MLP).
UpperCAmelCase_ = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , '''decoder''' , _UpperCamelCase )
UpperCAmelCase_ = layer_norm
if split_mlp_wi:
UpperCAmelCase_ = wi[0].T
UpperCAmelCase_ = wi[1].T
else:
UpperCAmelCase_ = wi.T
UpperCAmelCase_ = wo.T
UpperCAmelCase_ = old['''decoder/decoder_norm/scale''']
UpperCAmelCase_ = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCAmelCase_ = old['''decoder/logits_dense/kernel'''].T
return new
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
UpperCAmelCase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase_ = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase_ = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCAmelCase_ = state_dict['''shared.weight''']
return state_dict
def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = checkpoints.load_tax_checkpoint(_UpperCamelCase )
UpperCAmelCase_ = convert_tax_to_pytorch(_UpperCamelCase , num_layers=config.num_layers , is_encoder_only=_UpperCamelCase )
UpperCAmelCase_ = make_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : bool = False ):
'''simple docstring'''
UpperCAmelCase_ = TaConfig.from_json_file(_UpperCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCAmelCase_ = TaEncoderModel(_UpperCamelCase )
else:
UpperCAmelCase_ = TaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 390
|
'''simple docstring'''
from math import loga
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 390
| 1
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_A : Union[str, Any] = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
_A : Union[str, Any] = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
_A : Optional[Any] = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __magic_name__ ( self ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __magic_name__ ( self , _a , _a , _a = False , _a = False , _a = False , _a = False , ):
lowercase : Union[str, Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowercase : str = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
lowercase : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
lowercase : Any = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 700
|
"""simple docstring"""
from math import factorial
_A : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def __magic_name__ ( __snake_case : int ) -> int:
if not isinstance(__snake_case , __snake_case ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__snake_case ) )
def __magic_name__ ( __snake_case : int = 60 , __snake_case : int = 100_0000 ) -> int:
if not isinstance(__snake_case , __snake_case ) or not isinstance(__snake_case , __snake_case ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
lowercase : Union[str, Any] = 0
# the cached sizes of the previous chains
lowercase : dict[int, int] = {}
for start_chain_element in range(1 , __snake_case ):
# The temporary set will contain the elements of the chain
lowercase : str = set()
lowercase : Dict = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowercase : Any = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__snake_case )
chain_set_length += 1
lowercase : str = digit_factorial_sum(__snake_case )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowercase : str = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution()}")
| 518
| 0
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
A__ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
UpperCamelCase_ = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase_ = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
UpperCamelCase_ = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase_ = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def lowercase_ ( self ) -> int:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
_lowercase: Optional[Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase: Dict = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __magic_name__ :
UpperCamelCase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase: List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase: Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase: Optional[int] = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase: Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase: List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase: str = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase: int = data_args.train_file.split('''.''' )[-1]
_lowercase: Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase: Dict = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
_lowercase: List[Any] = load_dataset('''csv''' , data_files=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase: List[Any] = load_dataset('''json''' , data_files=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase: Union[str, Any] = raw_datasets["train"].features["label"].names
_lowercase: Tuple = len(_SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase: Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase: Optional[int] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_SCREAMING_SNAKE_CASE , )
_lowercase: Optional[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase: Optional[Any] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase: Any = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase: List[str] = {"Refused": 0, "Entailed": 1}
_lowercase: Optional[int] = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase: Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_UpperCamelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(_UpperCamelCase ):
_lowercase: Optional[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
_lowercase: int = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase: List[str] = examples["statement"]
_lowercase: List[str] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
_lowercase: List[Any] = tokenizer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
_lowercase: List[str] = examples["label"]
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
_lowercase: Any = raw_datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
_lowercase: str = raw_datasets["train"]
if data_args.max_train_samples is not None:
_lowercase: List[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
_lowercase: Union[str, Any] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_lowercase: Tuple = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
_lowercase: Tuple = raw_datasets["test"]
if data_args.max_predict_samples is not None:
_lowercase: Dict = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_SCREAMING_SNAKE_CASE ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase ):
_lowercase: List[Any] = p.predictions[0] if isinstance(p.predictions , _SCREAMING_SNAKE_CASE ) else p.predictions
_lowercase: Optional[int] = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase: Any = default_data_collator
elif training_args.fpaa:
_lowercase: List[str] = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 )
else:
_lowercase: List[str] = None
# Initialize our Trainer
_lowercase: Dict = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
_lowercase: Any = None
if training_args.resume_from_checkpoint is not None:
_lowercase: List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase: Union[str, Any] = last_checkpoint
_lowercase: Optional[int] = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
_lowercase: List[str] = train_result.metrics
_lowercase: List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_SCREAMING_SNAKE_CASE )
)
_lowercase: Dict = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('''train''' , _SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_lowercase: int = trainer.evaluate(eval_dataset=_SCREAMING_SNAKE_CASE )
_lowercase: Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_SCREAMING_SNAKE_CASE )
_lowercase: Optional[int] = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase: int = predict_dataset.remove_columns('''label''' )
_lowercase: Optional[int] = trainer.predict(_SCREAMING_SNAKE_CASE , metric_key_prefix='''predict''' ).predictions
_lowercase: Any = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
_lowercase: Dict = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(_SCREAMING_SNAKE_CASE ):
_lowercase: Dict = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
_lowercase: int = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 353
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Dict = [0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [0]
__SCREAMING_SNAKE_CASE : str = len(a__ )
self.assertEqual(k.knapsack(a__ , a__ , a__ , a__ ) , 0 )
__SCREAMING_SNAKE_CASE : Dict = [60]
__SCREAMING_SNAKE_CASE : int = [10]
__SCREAMING_SNAKE_CASE : Optional[int] = len(a__ )
self.assertEqual(k.knapsack(a__ , a__ , a__ , a__ ) , 0 )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = 3
__SCREAMING_SNAKE_CASE : int = [1, 2, 3]
__SCREAMING_SNAKE_CASE : List[Any] = [3, 2, 1]
__SCREAMING_SNAKE_CASE : Optional[int] = len(a__ )
self.assertEqual(k.knapsack(a__ , a__ , a__ , a__ ) , 5 )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 50
__SCREAMING_SNAKE_CASE : Optional[int] = [60, 100, 120]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [10, 20, 30]
__SCREAMING_SNAKE_CASE : Any = len(a__ )
self.assertEqual(k.knapsack(a__ , a__ , a__ , a__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 211
| 0
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase__ = Lock()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_A)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCamelCase__ : str = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCamelCase__ : Union[str, Any] = min(_A , _A)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_A)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCamelCase__ : List[str] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCamelCase__ : Optional[Any] = max(_A , _A)
# after all swaps are performed, send the values back to main
result_pipe[1].send(_A)
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : List[str] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCamelCase__ : Optional[Any] = Pipe()
UpperCamelCase__ : Optional[Any] = Pipe()
process_array_.append(
Process(
target=_A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
UpperCamelCase__ : int = temp_rs
UpperCamelCase__ : Optional[int] = temp_rr
for i in range(1 , len(_A) - 1):
UpperCamelCase__ : List[str] = Pipe()
UpperCamelCase__ : Dict = Pipe()
process_array_.append(
Process(
target=_A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
UpperCamelCase__ : Optional[Any] = temp_rs
UpperCamelCase__ : Any = temp_rr
process_array_.append(
Process(
target=_A , args=(
len(_A) - 1,
arr[len(_A) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_A) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_A)):
UpperCamelCase__ : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : int = list(range(10 , 0 , -1))
print('Initial List')
print(*_A)
UpperCamelCase__ : Dict = odd_even_transposition(_A)
print('Sorted List\n')
print(*_A)
if __name__ == "__main__":
main()
| 708
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : Optional[int] = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class lowerCAmelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] = 'switch_transformers'
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['past_key_values']
_SCREAMING_SNAKE_CASE : Tuple = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple=32_128 , _SCREAMING_SNAKE_CASE : str=768 , _SCREAMING_SNAKE_CASE : Any=64 , _SCREAMING_SNAKE_CASE : List[str]=2_048 , _SCREAMING_SNAKE_CASE : Union[str, Any]=64 , _SCREAMING_SNAKE_CASE : List[str]=12 , _SCREAMING_SNAKE_CASE : List[Any]=3 , _SCREAMING_SNAKE_CASE : str=12 , _SCREAMING_SNAKE_CASE : Optional[int]=3 , _SCREAMING_SNAKE_CASE : Union[str, Any]=12 , _SCREAMING_SNAKE_CASE : Any=8 , _SCREAMING_SNAKE_CASE : List[str]=False , _SCREAMING_SNAKE_CASE : Any=0.0_1 , _SCREAMING_SNAKE_CASE : str="float32" , _SCREAMING_SNAKE_CASE : List[str]=False , _SCREAMING_SNAKE_CASE : int=32 , _SCREAMING_SNAKE_CASE : str=128 , _SCREAMING_SNAKE_CASE : List[str]=0.1 , _SCREAMING_SNAKE_CASE : Dict=1E-6 , _SCREAMING_SNAKE_CASE : Dict=0.0_0_1 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.0_0_1 , _SCREAMING_SNAKE_CASE : Optional[Any]=1.0 , _SCREAMING_SNAKE_CASE : int="relu" , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : int=False , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : Optional[Any]=0 , _SCREAMING_SNAKE_CASE : Union[str, Any]=1 , **_SCREAMING_SNAKE_CASE : List[Any] , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Dict = d_model
SCREAMING_SNAKE_CASE : Optional[Any] = d_kv
SCREAMING_SNAKE_CASE : Dict = d_ff
SCREAMING_SNAKE_CASE : Optional[int] = num_sparse_encoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_layers
SCREAMING_SNAKE_CASE : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE : Union[str, Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
SCREAMING_SNAKE_CASE : Dict = self.num_layers // self.num_sparse_encoder_layers
else:
SCREAMING_SNAKE_CASE : List[str] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
SCREAMING_SNAKE_CASE : Tuple = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
SCREAMING_SNAKE_CASE : Any = self.num_decoder_layers # HACK: this will create 0 sparse layers
SCREAMING_SNAKE_CASE : Optional[int] = num_heads
SCREAMING_SNAKE_CASE : Any = num_experts
SCREAMING_SNAKE_CASE : Any = expert_capacity
SCREAMING_SNAKE_CASE : Tuple = router_bias
SCREAMING_SNAKE_CASE : int = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
SCREAMING_SNAKE_CASE : int = router_dtype
SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : Optional[int] = relative_attention_num_buckets
SCREAMING_SNAKE_CASE : List[Any] = relative_attention_max_distance
SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
SCREAMING_SNAKE_CASE : str = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Dict = initializer_factor
SCREAMING_SNAKE_CASE : Optional[Any] = feed_forward_proj
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : str = add_router_probs
SCREAMING_SNAKE_CASE : Tuple = router_z_loss_coef
SCREAMING_SNAKE_CASE : Dict = router_aux_loss_coef
SCREAMING_SNAKE_CASE : Any = self.feed_forward_proj.split('-' )
SCREAMING_SNAKE_CASE : Optional[Any] = act_info[-1]
SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == 'gated'
if len(lowercase__ ) > 1 and act_info[0] != "gated" or len(lowercase__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE : Tuple = 'gelu_new'
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , **lowercase__ , )
| 265
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = IFInpaintingPipeline
__lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCAmelCase : Any = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCamelCase ( self :int ):
return self._get_dummy_components()
def __lowerCamelCase ( self :List[str] ,__lowercase :Tuple ,__lowercase :Any=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ : Optional[Any] = torch.manual_seed(_a )
else:
snake_case__ : Tuple = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ : Dict = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_a ) ).to(_a )
snake_case__ : Optional[int] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_a ) ).to(_a )
snake_case__ : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def __lowerCamelCase ( self :List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __lowerCamelCase ( self :Optional[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''' )
def __lowerCamelCase ( self :List[str] ):
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __lowerCamelCase ( self :Dict ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __lowerCamelCase ( self :Any ):
self._test_save_load_local()
def __lowerCamelCase ( self :Dict ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 705
|
from __future__ import annotations
from math import pi, sqrt
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219
| 0
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCamelCase( unittest.TestCase ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any=1_3 , SCREAMING_SNAKE_CASE : int=3_0 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Any=3_2 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : List[Any]=4 , SCREAMING_SNAKE_CASE : List[Any]=3_7 , SCREAMING_SNAKE_CASE : Any="gelu" , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : str=1_0 , SCREAMING_SNAKE_CASE : str=0.02 , ) -> int:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , )
return config, pixel_values
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = FlaxViTModel(config=a__ )
__snake_case = model(a__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (self.image_size, self.image_size)
__snake_case = (self.patch_size, self.patch_size)
__snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ) -> int:
'''simple docstring'''
__snake_case = self.type_sequence_label_size
__snake_case = FlaxViTForImageClassification(config=a__ )
__snake_case = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = FlaxViTForImageClassification(a__ )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(a__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
(
__snake_case
) = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case_ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
'''simple docstring'''
__snake_case = FlaxViTModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model_class(a__ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Tuple ):
return model(pixel_values=a__ , **a__ )
with self.subTest("JIT Enabled" ):
__snake_case = model_jitted(**a__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__snake_case = model_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained("google/vit-base-patch16-224" )
__snake_case = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(a__ )
| 371
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=400 , a__=True , a__=32 , a__=True , ):
__SCREAMING_SNAKE_CASE : List[str] = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : int = num_channels
__SCREAMING_SNAKE_CASE : Optional[Any] = image_size
__SCREAMING_SNAKE_CASE : Any = min_resolution
__SCREAMING_SNAKE_CASE : Optional[Any] = max_resolution
__SCREAMING_SNAKE_CASE : List[str] = do_resize
__SCREAMING_SNAKE_CASE : Union[str, Any] = size_divisor
__SCREAMING_SNAKE_CASE : Dict = do_rescale
def a_ ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Dict = GLPNImageProcessor if is_vision_available() else None
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = GLPNImageProcessingTester(self )
@property
def a_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , "do_resize" ) )
self.assertTrue(hasattr(a__ , "size_divisor" ) )
self.assertTrue(hasattr(a__ , "resample" ) )
self.assertTrue(hasattr(a__ , "do_rescale" ) )
def a_ ( self ):
pass
def a_ ( self ):
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def a_ ( self ):
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def a_ ( self ):
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 211
| 0
|
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_SCREAMING_SNAKE_CASE = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
_SCREAMING_SNAKE_CASE = dataset.iloc[:, 1:2].values
_SCREAMING_SNAKE_CASE = dataset.iloc[:, 2].values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_test_split(X, y, test_size=0.2, random_state=0)
_SCREAMING_SNAKE_CASE = PolynomialFeatures(degree=4)
_SCREAMING_SNAKE_CASE = poly_reg.fit_transform(X)
_SCREAMING_SNAKE_CASE = LinearRegression()
pol_reg.fit(X_poly, y)
def __lowerCamelCase ( ) -> List[Any]:
plt.scatter(__lowerCAmelCase , __lowerCAmelCase , color="""red""" )
plt.plot(__lowerCAmelCase , pol_reg.predict(poly_reg.fit_transform(__lowerCAmelCase ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 517
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case = ksize + 1
snake_case = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCAmelCase ):
for x in range(__lowerCAmelCase ):
# distance from center
snake_case = x - ksize // 2
snake_case = y - ksize // 2
# degree to radiant
snake_case = theta / 1_80 * np.pi
snake_case = np.cos(_theta )
snake_case = np.sin(_theta )
# get kernel x
snake_case = cos_theta * px + sin_theta * py
# get kernel y
snake_case = -sin_theta * px + cos_theta * py
# fill kernel
snake_case = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_SCREAMING_SNAKE_CASE = imread("../image_data/lena.jpg")
# turn image in gray scale value
_SCREAMING_SNAKE_CASE = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_SCREAMING_SNAKE_CASE = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_SCREAMING_SNAKE_CASE = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_SCREAMING_SNAKE_CASE = out / out.max() * 255
_SCREAMING_SNAKE_CASE = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 517
| 1
|
'''simple docstring'''
from __future__ import annotations
__snake_case : Tuple = list[tuple[int, int]]
__snake_case : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case : List[Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> int:
"""simple docstring"""
UpperCAmelCase_ = pos_x
UpperCAmelCase_ = pos_y
UpperCAmelCase_ = (pos_y, pos_x)
UpperCAmelCase_ = goal_x
UpperCAmelCase_ = goal_y
UpperCAmelCase_ = g_cost
UpperCAmelCase_ = parent
UpperCAmelCase_ = self.calculate_heuristic()
def lowerCamelCase_ ( self ) -> float:
"""simple docstring"""
UpperCAmelCase_ = abs(self.pos_x - self.goal_x )
UpperCAmelCase_ = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase__ )
UpperCAmelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , UpperCamelCase__ )
UpperCAmelCase_ = [self.start]
UpperCAmelCase_ = []
UpperCAmelCase_ = False
def lowerCamelCase_ ( self ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase_ = True
return self.retrace_path(UpperCamelCase__ )
self.closed_nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = self.get_successors(UpperCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
UpperCAmelCase_ = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase__ )
else:
self.open_nodes.append(UpperCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> list[Node]:
"""simple docstring"""
UpperCAmelCase_ = []
for action in delta:
UpperCAmelCase_ = parent.pos_x + action[1]
UpperCAmelCase_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase__ , ) )
return successors
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Path:
"""simple docstring"""
UpperCAmelCase_ = node
UpperCAmelCase_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__snake_case : str = (0, 0)
__snake_case : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
__snake_case : Any = GreedyBestFirst(init, goal)
__snake_case : Any = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__snake_case : Any = 2
for elem in grid:
print(elem)
| 660
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 660
| 1
|
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = credit_card_number
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE__ ) - 2
for i in range(SCREAMING_SNAKE_CASE__ , -1 , -2 ):
# double the value of every second digit
__lowerCamelCase : Dict = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__lowerCamelCase : Union[str, Any] = cc_number[:i] + str(SCREAMING_SNAKE_CASE__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(SCREAMING_SNAKE_CASE__ ) <= 16:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(SCREAMING_SNAKE_CASE__ ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(SCREAMING_SNAKE_CASE__ ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 230
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 230
| 1
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowercase__ = logging.getLogger(__name__)
lowercase__ = tf.data.AUTOTUNE
def _UpperCamelCase ( ) -> str:
'''simple docstring'''
snake_case : Dict = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=SCREAMING_SNAKE_CASE__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=SCREAMING_SNAKE_CASE__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=SCREAMING_SNAKE_CASE__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=SCREAMING_SNAKE_CASE__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=SCREAMING_SNAKE_CASE__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=SCREAMING_SNAKE_CASE__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=SCREAMING_SNAKE_CASE__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=SCREAMING_SNAKE_CASE__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=SCREAMING_SNAKE_CASE__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=SCREAMING_SNAKE_CASE__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=SCREAMING_SNAKE_CASE__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=SCREAMING_SNAKE_CASE__ , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=SCREAMING_SNAKE_CASE__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
snake_case : Tuple = parser.parse_args()
return args
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
'''simple docstring'''
try:
if args.tpu_name:
snake_case : Any = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case : str = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(SCREAMING_SNAKE_CASE__ )
tf.tpu.experimental.initialize_tpu_system(SCREAMING_SNAKE_CASE__ )
return tpu
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = 0
for file in file_list:
snake_case : Union[str, Any] = file.split('''/''' )[-1]
snake_case : Dict = re.search(R'''-\d+-(\d+)\.tfrecord''' , SCREAMING_SNAKE_CASE__ ).group(1 )
snake_case : str = int(SCREAMING_SNAKE_CASE__ )
num_samples += sample_count
return num_samples
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> str:
'''simple docstring'''
snake_case : int = count_samples(SCREAMING_SNAKE_CASE__ )
snake_case : Tuple = tf.data.Dataset.from_tensor_slices(SCREAMING_SNAKE_CASE__ )
if shuffle:
snake_case : int = dataset.shuffle(len(SCREAMING_SNAKE_CASE__ ) )
snake_case : Optional[int] = tf.data.TFRecordDataset(SCREAMING_SNAKE_CASE__ , num_parallel_reads=SCREAMING_SNAKE_CASE__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case : Optional[Any] = dataset.apply(tf.data.experimental.assert_cardinality(SCREAMING_SNAKE_CASE__ ) )
snake_case : Tuple = dataset.map(SCREAMING_SNAKE_CASE__ , num_parallel_calls=SCREAMING_SNAKE_CASE__ )
if shuffle:
assert shuffle_buffer_size is not None
snake_case : List[str] = dataset.shuffle(args.shuffle_buffer_size )
snake_case : Dict = dataset.batch(SCREAMING_SNAKE_CASE__ , drop_remainder=SCREAMING_SNAKE_CASE__ )
snake_case : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE__ , num_parallel_calls=SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = dataset.prefetch(SCREAMING_SNAKE_CASE__ )
return dataset
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
if not args.no_tpu:
snake_case : List[Any] = initialize_tpu(SCREAMING_SNAKE_CASE__ )
snake_case : Union[str, Any] = tf.distribute.TPUStrategy(SCREAMING_SNAKE_CASE__ )
else:
snake_case : str = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case : int = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case : int = tokenizer.vocab_size
snake_case : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
snake_case : Optional[int] = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
snake_case : str = count_samples(SCREAMING_SNAKE_CASE__ )
snake_case : Dict = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case : List[str] = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case : Any = TFAutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case ,snake_case : Union[str, Any] = create_optimizer(
num_train_steps=SCREAMING_SNAKE_CASE__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=SCREAMING_SNAKE_CASE__ , metrics=['''accuracy'''] )
def decode_fn(SCREAMING_SNAKE_CASE__ ):
snake_case : Union[str, Any] = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case : List[Any] = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE__ , mlm_probability=args.mlm_probability , mlm=SCREAMING_SNAKE_CASE__ , return_tensors='''tf''' )
def mask_with_collator(SCREAMING_SNAKE_CASE__ ):
# TF really needs an isin() function
snake_case : List[Any] = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
snake_case ,snake_case : Tuple = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(SCREAMING_SNAKE_CASE__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=SCREAMING_SNAKE_CASE__ , )
return batch
snake_case : Optional[int] = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case : str = prepare_dataset(
SCREAMING_SNAKE_CASE__ , decode_fn=SCREAMING_SNAKE_CASE__ , mask_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case : Any = prepare_dataset(
SCREAMING_SNAKE_CASE__ , decode_fn=SCREAMING_SNAKE_CASE__ , mask_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , )
snake_case : Union[str, Any] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=SCREAMING_SNAKE_CASE__ ) )
model.fit(
SCREAMING_SNAKE_CASE__ , validation_data=SCREAMING_SNAKE_CASE__ , epochs=args.num_epochs , callbacks=SCREAMING_SNAKE_CASE__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowercase__ = parse_args()
main(args)
| 638
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase__ = TypeVar("T")
class snake_case__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : list[T] , UpperCamelCase__ : Callable[[T, T], T] ) -> None:
"""simple docstring"""
snake_case : Any | T = None
snake_case : int = len(UpperCamelCase__ )
snake_case : list[T] = [any_type for _ in range(self.N )] + arr
snake_case : Optional[int] = fnc
self.build()
def lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
snake_case : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : T ) -> None:
"""simple docstring"""
p += self.N
snake_case : int = v
while p > 1:
snake_case : List[Any] = p // 2
snake_case : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> T | None: # noqa: E741
"""simple docstring"""
snake_case ,snake_case : Union[str, Any] = l + self.N, r + self.N
snake_case : T | None = None
while l <= r:
if l % 2 == 1:
snake_case : str = self.st[l] if res is None else self.fn(UpperCamelCase__ , self.st[l] )
if r % 2 == 0:
snake_case : Tuple = self.st[r] if res is None else self.fn(UpperCamelCase__ , self.st[r] )
snake_case ,snake_case : Dict = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase__ = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase__ = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase__ = SegmentTree(test_array, min)
lowercase__ = SegmentTree(test_array, max)
lowercase__ = SegmentTree(test_array, lambda a, b: a + b)
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
snake_case : int = reduce(SCREAMING_SNAKE_CASE__ , test_array[i : j + 1] )
snake_case : List[str] = reduce(SCREAMING_SNAKE_CASE__ , test_array[i : j + 1] )
snake_case : Tuple = reduce(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert max_range == max_segment_tree.query(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert sum_range == sum_segment_tree.query(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
test_all_segments()
for index, value in test_updates.items():
lowercase__ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 638
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__UpperCAmelCase )
class UpperCAmelCase__ ( __UpperCAmelCase ):
lowerCAmelCase_ = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase_ = Features({'text': Value('string' )} )
lowerCAmelCase_ = Features({'labels': ClassLabel} )
lowerCAmelCase_ = "text"
lowerCAmelCase_ = "labels"
def lowerCamelCase_ ( self : Dict,__A : int ):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column],UpperCAmelCase_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
_lowerCamelCase : Dict = copy.deepcopy(self )
_lowerCamelCase : List[Any] = self.label_schema.copy()
_lowerCamelCase : str = features[self.label_column]
_lowerCamelCase : Optional[Any] = label_schema
return task_template
@property
def lowerCamelCase_ ( self : Optional[Any] ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 710
|
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 11
| 0
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __snake_case ( __A ) -> Union[str, Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def __snake_case ( __A ) -> Optional[Any]:
# word like '180' or '身高' or '神'
for char in word:
lowercase : List[Any] = ord(__A )
if not _is_chinese_char(__A ):
return 0
return 1
def __snake_case ( __A ) -> Tuple:
lowercase : List[str] = set()
for token in tokens:
lowercase : Optional[Any] = len(__A ) > 1 and is_chinese(__A )
if chinese_word:
word_set.add(__A )
lowercase : List[str] = list(__A )
return word_list
def __snake_case ( __A ,__A ) -> Union[str, Any]:
if not chinese_word_set:
return bert_tokens
lowercase : Union[str, Any] = max([len(__A ) for w in chinese_word_set] )
lowercase : int = bert_tokens
lowercase , lowercase : Optional[Any] = 0, len(__A )
while start < end:
lowercase : Any = True
if is_chinese(bert_word[start] ):
lowercase : Dict = min(end - start ,__A )
for i in range(__A ,1 ,-1 ):
lowercase : List[Any] = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
lowercase : Optional[int] = """##""" + bert_word[j]
lowercase : Tuple = start + i
lowercase : int = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __A ,__A ,__A ) -> List[str]:
lowercase : Any = []
for i in range(0 ,len(__A ) ,100 ):
lowercase : List[str] = ltp_tokenizer.pipeline(lines[i : i + 100] ,tasks=["""cws"""] ).cws
lowercase : Tuple = [get_chinese_word(__A ) for r in res]
ltp_res.extend(__A )
assert len(__A ) == len(__A )
lowercase : Optional[Any] = []
for i in range(0 ,len(__A ) ,100 ):
lowercase : Tuple = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__A ,truncation=__A ,max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__A ) == len(__A )
lowercase : Optional[Any] = []
for input_ids, chinese_word in zip(__A ,__A ):
lowercase : List[str] = []
for id in input_ids:
lowercase : int = bert_tokenizer._convert_id_to_token(__A )
input_tokens.append(__A )
lowercase : Dict = add_sub_symbol(__A ,__A )
lowercase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__A ):
if token[:2] == "##":
lowercase : Dict = token[2:]
# save chinese tokens' pos
if len(__A ) == 1 and _is_chinese_char(ord(__A ) ):
ref_id.append(__A )
ref_ids.append(__A )
assert len(__A ) == len(__A )
return ref_ids
def __snake_case ( __A ) -> Dict:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name ,"""r""" ,encoding="""utf-8""" ) as f:
lowercase : int = f.readlines()
lowercase : Union[str, Any] = [line.strip() for line in data if len(__A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase : str = LTP(args.ltp ) # faster in GPU device
lowercase : Union[str, Any] = BertTokenizer.from_pretrained(args.bert )
lowercase : List[str] = prepare_ref(__A ,__A ,__A )
with open(args.save_path ,"""w""" ,encoding="""utf-8""" ) as f:
lowercase : int = [json.dumps(__A ) + """\n""" for ref in ref_ids]
f.writelines(__A )
if __name__ == "__main__":
lowerCAmelCase: Dict =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
lowerCAmelCase: str =parser.parse_args()
main(args)
| 607
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase: List[Any] =logging.get_logger(__name__)
class lowerCamelCase__ :
def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ) -> Any:
"""simple docstring"""
if not conversation_id:
lowercase : int = uuid.uuida()
if past_user_inputs is None:
lowercase : Any = []
if generated_responses is None:
lowercase : Dict = []
lowercase : uuid.UUID = conversation_id
lowercase : List[str] = past_user_inputs
lowercase : List[str] = generated_responses
lowercase : Optional[str] = text
def __eq__( self , snake_case ) -> Any:
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _UpperCAmelCase ( self , snake_case , snake_case = False ) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
f'''with: "{text}".''' )
lowercase : Any = text
else:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
lowercase : Union[str, Any] = text
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowercase : Optional[Any] = None
def _UpperCAmelCase ( self , snake_case ) -> Tuple:
"""simple docstring"""
self.generated_responses.append(snake_case )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] = f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
lowercase : Any = """user""" if is_user else """bot"""
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
__UpperCamelCase , r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class lowerCamelCase__ ( __UpperCamelCase ):
def __init__( self , *snake_case , **snake_case ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*snake_case , **snake_case )
if self.tokenizer.pad_token_id is None:
lowercase : Union[str, Any] = self.tokenizer.eos_token
def _UpperCAmelCase ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ) -> Tuple:
"""simple docstring"""
lowercase : int = {}
lowercase : Union[str, Any] = {}
lowercase : Union[str, Any] = {}
if min_length_for_response is not None:
lowercase : List[Any] = min_length_for_response
if minimum_tokens is not None:
lowercase : Dict = minimum_tokens
if "max_length" in generate_kwargs:
lowercase : List[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowercase : List[str] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case )
return preprocess_params, forward_params, postprocess_params
def __call__( self , snake_case , snake_case=0 , **snake_case ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = super().__call__(snake_case , num_workers=snake_case , **snake_case )
if isinstance(snake_case , snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
def _UpperCAmelCase ( self , snake_case , snake_case=3_2 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
lowercase : Any = self.tokenizer._build_conversation_input_ids(snake_case )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowercase : Any = self._legacy_parse_and_tokenize(snake_case )
if self.framework == "pt":
lowercase : List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowercase : str = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _UpperCAmelCase ( self , snake_case , snake_case=1_0 , **snake_case ) -> int:
"""simple docstring"""
lowercase : Any = generate_kwargs.get("""max_length""" , self.model.config.max_length )
lowercase : Tuple = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
lowercase : List[Any] = max_length - minimum_tokens
lowercase : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
lowercase : int = model_inputs["""attention_mask"""][:, -trim:]
lowercase : int = model_inputs.pop("""conversation""" )
lowercase : Optional[int] = max_length
lowercase : Optional[int] = self.model.generate(**snake_case , **snake_case )
if self.model.config.is_encoder_decoder:
lowercase : Union[str, Any] = 1
else:
lowercase : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _UpperCAmelCase ( self , snake_case , snake_case=True ) -> List[str]:
"""simple docstring"""
lowercase : int = model_outputs["""output_ids"""]
lowercase : str = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , )
lowercase : str = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(snake_case )
return conversation
def _UpperCAmelCase ( self , snake_case ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.tokenizer.eos_token_id
lowercase : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
if len(snake_case ) > self.tokenizer.model_max_length:
lowercase : Union[str, Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 607
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _lowerCamelCase :
def __init__( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int]=1_3 , lowerCamelCase_ : str=1_0 , lowerCamelCase_ : Dict=3 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : int=True , lowerCamelCase_ : str=3_2 , lowerCamelCase_ : str=5 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=3_7 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Any=1_0 , lowerCamelCase_ : str=0.02 , lowerCamelCase_ : Optional[Any]="divided_space_time" , lowerCamelCase_ : Optional[int]=None , ):
"""simple docstring"""
_lowercase : str = parent
_lowercase : str = batch_size
_lowercase : List[Any] = image_size
_lowercase : Optional[Any] = num_channels
_lowercase : Union[str, Any] = patch_size
_lowercase : Dict = num_frames
_lowercase : List[str] = is_training
_lowercase : str = use_labels
_lowercase : List[str] = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Dict = intermediate_size
_lowercase : int = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : Optional[Any] = attention_type
_lowercase : List[str] = initializer_range
_lowercase : Optional[int] = scope
_lowercase : Dict = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_lowercase : List[Any] = (image_size // patch_size) ** 2
_lowercase : Dict = (num_frames) * self.num_patches_per_frame + 1
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowercase : List[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_lowercase : Any = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_lowercase : str = self.num_labels
return config
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
_lowercase : int = TimesformerModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_lowercase : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
_lowercase : Optional[int] = TimesformerForVideoClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_lowercase : List[Any] = model(lowerCamelCase_ )
# verify the logits shape
_lowercase : Tuple = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : Any = config_and_inputs
_lowercase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_snake_case = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_snake_case = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_lowercase : Tuple = TimesformerModelTester(self )
_lowercase : int = ConfigTester(
self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=False ):
"""simple docstring"""
_lowercase : int = copy.deepcopy(lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
_lowercase : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[str] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : int = model_class(lowerCamelCase_ )
_lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : List[str] = [*signature.parameters.keys()]
_lowercase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCamelCase_ )
@slow
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : int = TimesformerModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
_lowercase , _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[int] = True
for model_class in self.all_model_classes:
_lowercase : Optional[Any] = self.model_tester.seq_length
_lowercase : Union[str, Any] = self.model_tester.num_frames
_lowercase : List[str] = True
_lowercase : Dict = False
_lowercase : Optional[Any] = True
_lowercase : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
_lowercase : int = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_lowercase : List[str] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase : int = True
_lowercase : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
_lowercase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_lowercase : Optional[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_lowercase : Tuple = len(lowerCamelCase_ )
# Check attention is always last and order is fine
_lowercase : Dict = True
_lowercase : List[Any] = True
_lowercase : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
_lowercase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase_ ) )
_lowercase : str = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] ):
_lowercase : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
_lowercase : Dict = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_lowercase : Optional[Any] = outputs.hidden_states
_lowercase : Any = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
_lowercase : List[str] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[str] = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Optional[int] = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : Dict = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' ,filename='eating_spaghetti.npy' ,repo_type='dataset' )
_lowercase : int = np.load(__UpperCAmelCase )
return list(__UpperCAmelCase )
@require_torch
@require_vision
class _lowerCamelCase (unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_lowercase : Any = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
lowerCamelCase_ )
_lowercase : Dict = self.default_image_processor
_lowercase : Dict = prepare_video()
_lowercase : Any = image_processor(video[:8] , return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase_ )
# verify the logits
_lowercase : List[str] = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
_lowercase : int = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 283
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 283
| 1
|
from math import isclose, sqrt
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = point_y / 4 / point_x
A_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
A_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
A_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
A_ = outgoing_gradient**2 + 4
A_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
A_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
A_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
A_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
A_ = x_minus if isclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else x_plus
A_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCamelCase_ ( __UpperCamelCase = 1.4 , __UpperCamelCase = -9.6 ):
A_ = 0
A_ = first_x_coord
A_ = first_y_coord
A_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
A_ , A_ , A_ = next_point(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 141
|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def _snake_case ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=_SCREAMING_SNAKE_CASE , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=_SCREAMING_SNAKE_CASE , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=_SCREAMING_SNAKE_CASE , help="""where to store parsed gold_data_path file""" , )
lowerCAmelCase = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
lowerCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
for dpr_record in tqdm(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = dpr_record["""question"""]
lowerCAmelCase = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(_SCREAMING_SNAKE_CASE ) + """\n""" )
if __name__ == "__main__":
main()
| 433
| 0
|
def _snake_case ( __snake_case ) -> int:
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : str = len(__snake_case ) - 1
UpperCAmelCase_ : str = 0
while index >= 0:
UpperCAmelCase_ : Optional[Any] = (ord(column_title[index] ) - 6_4) * pow(2_6 , __snake_case )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 715
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 455
| 0
|
import qiskit
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowerCamelCase_ : Any = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
lowerCamelCase_ : List[str] = qiskit.QuantumCircuit(__UpperCAmelCase , __UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowerCamelCase_ : str = qiskit.execute(__UpperCAmelCase , __UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
print(f"Total count for various states are: {single_qubit_measure(1, 1)}")
| 488
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , __magic_name__ : List[Any] , __magic_name__ : Tuple=13 , __magic_name__ : int=7 , __magic_name__ : List[str]=True , __magic_name__ : List[Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=True , __magic_name__ : List[Any]=99 , __magic_name__ : Tuple=32 , __magic_name__ : Optional[int]=5 , __magic_name__ : Dict=4 , __magic_name__ : List[Any]=37 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : int=512 , __magic_name__ : Any=16 , __magic_name__ : Optional[int]=2 , __magic_name__ : Dict=0.02 , __magic_name__ : List[str]=4 , ) -> str:
lowerCamelCase_ : Tuple = parent
lowerCamelCase_ : List[str] = batch_size
lowerCamelCase_ : str = seq_length
lowerCamelCase_ : Optional[int] = is_training
lowerCamelCase_ : Optional[int] = use_attention_mask
lowerCamelCase_ : Optional[int] = use_token_type_ids
lowerCamelCase_ : int = use_labels
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : Any = num_attention_heads
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : Dict = hidden_act
lowerCamelCase_ : Optional[int] = hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_ : Optional[int] = max_position_embeddings
lowerCamelCase_ : Optional[int] = type_vocab_size
lowerCamelCase_ : int = type_sequence_label_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : Tuple = num_choices
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
lowerCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Optional[int] = None
if self.use_attention_mask:
lowerCamelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : List[Any] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
lowerCamelCase_ : str = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[str] = config_and_inputs
lowerCamelCase_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
lowerCamelCase_ : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple = config_and_inputs
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = True
lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
lowerCamelCase_ : Dict = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__magic_name__ )
lowerCamelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
lowerCamelCase_ : List[Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__magic_name__ )
lowerCamelCase_ : List[Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase_ : Optional[int] = model(__magic_name__ )[0]
lowerCamelCase_ : Tuple = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase_ : List[str] = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
lowerCamelCase_ : Optional[Any] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__magic_name__ )
lowerCamelCase_ : List[Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase_ : Dict = model(__magic_name__ )[0]
# compare the actual values for a slice.
lowerCamelCase_ : List[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __magic_name__ , atol=1e-4 ) )
| 488
| 1
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
try:
with open(lowerCamelCase , """rb""" ) as flax_state_f:
__magic_name__ : Dict =from_bytes(lowerCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowerCamelCase ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
__magic_name__ : int =flatten_dict(jax.tree_util.tree_map(lambda lowerCamelCase : x.dtype == jnp.bfloataa , lowerCamelCase ) ).values()
if any(lowerCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
__magic_name__ : List[Any] =jax.tree_util.tree_map(
lambda lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCamelCase )
__magic_name__ : int =""""""
__magic_name__ : Any =flatten_dict(lowerCamelCase , sep=""".""" )
__magic_name__ : List[Any] =pt_model.state_dict()
# keep track of unexpected & missing keys
__magic_name__ : Any =[]
__magic_name__ : List[str] =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__magic_name__ : Tuple =flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__magic_name__ : int =flax_key_tuple_array[:-1] + ["""weight"""]
__magic_name__ : str =jnp.transpose(lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__magic_name__ : Tuple =flax_key_tuple_array[:-1] + ["""weight"""]
__magic_name__ : str =flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__magic_name__ : Tuple =flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowerCamelCase ):
__magic_name__ : Optional[Any] =(
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
__magic_name__ : str =""".""".join(lowerCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
__magic_name__ : int =np.asarray(lowerCamelCase ) if not isinstance(lowerCamelCase , np.ndarray ) else flax_tensor
__magic_name__ : List[str] =torch.from_numpy(lowerCamelCase )
# remove from missing keys
missing_keys.remove(lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCamelCase )
pt_model.load_state_dict(lowerCamelCase )
# re-transform missing_keys to list
__magic_name__ : Dict =list(lowerCamelCase )
if len(lowerCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(lowerCamelCase ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 367
|
UpperCAmelCase_ : int = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCAmelCase_ : Optional[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCAmelCase_ : int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCAmelCase_ : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCAmelCase_ : int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCAmelCase_ : str = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCAmelCase_ : Any = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCAmelCase_ : Any = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 367
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "sentencepiece.bpe.model"}
_a : List[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_a : Union[str, Any] = {
"moussaKam/mbarthez": 1_024,
"moussaKam/barthez": 1_024,
"moussaKam/barthez-orangesum-title": 1_024,
}
_a : Dict = "▁"
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Tuple="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="<unk>" , SCREAMING_SNAKE_CASE_ : Any="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
__snake_case = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__snake_case = len(self.sp_model ) - 1
__snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a ( self : List[str] ) -> Dict:
return len(self.sp_model )
def a ( self : Optional[int] ) -> List[str]:
__snake_case = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
return spm_id if spm_id else self.unk_token_id
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
__snake_case = []
__snake_case = ''
__snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
__snake_case = True
__snake_case = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
__snake_case = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : List[str] ) -> Tuple:
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 56
|
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 251
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__UpperCAmelCase = {
'''facebook/m2m100_418M''': 1024,
}
# fmt: off
__UpperCAmelCase = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class __UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = ["input_ids", "attention_mask"]
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__( self : Any , a_ : Any , a_ : int , a_ : int=None , a_ : Union[str, Any]=None , a_ : Optional[Any]="<s>" , a_ : Tuple="</s>" , a_ : int="</s>" , a_ : Optional[int]="<pad>" , a_ : List[Any]="<unk>" , a_ : Tuple="m2m100" , a_ : Optional[Dict[str, Any]] = None , a_ : Optional[Any]=8 , **a_ : Union[str, Any] , ) -> None:
'''simple docstring'''
a__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
a__ : List[str] = language_codes
a__ : int = FAIRSEQ_LANGUAGE_CODES[language_codes]
a__ : Tuple = {lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
a__ : Optional[Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(a_ )
for lang_code in fairseq_language_code
if self.get_lang_token(a_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a_ , tgt_lang=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , unk_token=a_ , pad_token=a_ , language_codes=a_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a_ , **a_ , )
a__ : List[str] = vocab_file
a__ : Optional[int] = load_json(a_ )
a__ : List[Any] = {v: k for k, v in self.encoder.items()}
a__ : List[Any] = spm_file
a__ : Any = load_spm(a_ , self.sp_model_kwargs )
a__ : Tuple = len(self.encoder )
a__ : Any = {
self.get_lang_token(a_ ): self.encoder_size + i for i, lang_code in enumerate(a_ )
}
a__ : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(a_ )}
a__ : Any = {v: k for k, v in self.lang_token_to_id.items()}
a__ : Union[str, Any] = src_lang if src_lang is not None else "en"
a__ : Union[str, Any] = tgt_lang
a__ : List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
a__ : Optional[int] = num_madeup_words
@property
def UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCAmelCase ( self : Any ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self : List[Any] , a_ : str ) -> None:
'''simple docstring'''
a__ : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase ( self : Tuple , a_ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a_ , out_type=a_ )
def UpperCAmelCase ( self : List[Any] , a_ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(a_ , self.encoder[self.unk_token] )
def UpperCAmelCase ( self : str , a_ : int ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(a_ , self.unk_token )
def UpperCAmelCase ( self : Dict , a_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = []
a__ : Optional[int] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
a__ : List[str] = []
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def UpperCAmelCase ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
a__ : Any = [1] * len(self.prefix_tokens )
a__ : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a_ )) + suffix_ones
return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def UpperCAmelCase ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
a__ : int = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Dict:
'''simple docstring'''
a__ : Tuple = self.__dict__.copy()
a__ : Optional[int] = None
return state
def __setstate__( self : List[str] , a_ : Dict ) -> None:
'''simple docstring'''
a__ : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : List[Any] = {}
a__ : Optional[int] = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase ( self : List[Any] , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
a__ : Dict = Path(a_ )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
a__ : Union[str, Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
a__ : Tuple = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(a_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a_ )
elif not os.path.isfile(self.spm_file ):
with open(a_ , "wb" ) as fi:
a__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (str(a_ ), str(a_ ))
def UpperCAmelCase ( self : Any , a_ : List[str] , a_ : str = "en" , a_ : Optional[List[str]] = None , a_ : str = "ro" , **a_ : Dict , ) -> BatchEncoding:
'''simple docstring'''
a__ : str = src_lang
a__ : Any = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def UpperCAmelCase ( self : Optional[Any] , a_ : Dict , a_ : Optional[str] , a_ : Optional[str] , **a_ : Tuple ) -> str:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a__ : List[Any] = src_lang
a__ : Optional[int] = self(a_ , add_special_tokens=a_ , **a_ )
a__ : Any = self.get_lang_id(a_ )
a__ : int = tgt_lang_id
return inputs
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self : Union[str, Any] , a_ : str ) -> None:
'''simple docstring'''
a__ : Optional[int] = self.get_lang_token(a_ )
a__ : Tuple = self.lang_token_to_id[lang_token]
a__ : List[str] = [self.cur_lang_id]
a__ : Optional[int] = [self.eos_token_id]
def UpperCAmelCase ( self : List[str] , a_ : str ) -> None:
'''simple docstring'''
a__ : Optional[int] = self.get_lang_token(a_ )
a__ : int = self.lang_token_to_id[lang_token]
a__ : Tuple = [self.cur_lang_id]
a__ : Optional[int] = [self.eos_token_id]
def UpperCAmelCase ( self : Any , a_ : str ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def UpperCAmelCase ( self : List[str] , a_ : str ) -> int:
'''simple docstring'''
a__ : List[str] = self.get_lang_token(a_ )
return self.lang_token_to_id[lang_token]
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
a__ : Any = sentencepiece.SentencePieceProcessor(**lowerCAmelCase__ )
spm.Load(str(lowerCAmelCase__ ) )
return spm
def lowercase__ ( lowerCAmelCase__ : str ) -> Union[Dict, List]:
'''simple docstring'''
with open(lowerCAmelCase__ , "r" ) as f:
return json.load(lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
with open(lowerCAmelCase__ , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=2 )
| 251
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase (a_ :str) -> Dict:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code)
class __magic_name__ ( __UpperCAmelCase ):
@staticmethod
def __snake_case ( snake_case__ : ArgumentParser ):
'''simple docstring'''
lowercase :Any = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=snake_case__ , default=snake_case__ , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=snake_case__ , help='''Name of the model to download''' )
download_parser.set_defaults(func=snake_case__ )
def __init__( self : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : bool , snake_case__ : bool ):
'''simple docstring'''
lowercase :str = model
lowercase :int = cache
lowercase :List[str] = force
lowercase :int = trust_remote_code
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 677
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase (a_ :int , a_ :Union[str, Any] , a_ :List[Any]) -> List[str]:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCamelCase (a_ :Optional[Any] , a_ :Optional[int] , a_ :str , a_ :Any="attention") -> Optional[int]:
lowercase :Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :])
lowercase :int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2])
lowercase :str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :])
lowercase :Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2])
lowercase :int = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :])
lowercase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2])
lowercase :List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :])
lowercase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2])
return k, o, q, v
def lowerCamelCase (a_ :Any , a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Union[str, Any]=False) -> List[Any]:
if split_mlp_wi:
lowercase :List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowercase :Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowercase :Dict = (wi_a, wi_a)
else:
lowercase :Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowercase :Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCamelCase (a_ :Any , a_ :Optional[Any] , a_ :Optional[Any] , a_ :Union[str, Any]) -> Optional[Any]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCamelCase (a_ :dict , *, a_ :int , a_ :bool , a_ :bool = False) -> int:
lowercase :Dict = traverse_util.flatten_dict(variables['''target'''])
lowercase :Optional[Any] = {'''/'''.join(a_): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase :str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , a_)
lowercase :str = collections.OrderedDict()
# Shared embeddings.
lowercase :int = old['''token_embedder/embedding''']
# Encoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Union[str, Any] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :Tuple = tax_attention_lookup(a_ , a_ , '''encoder''' , '''attention''')
lowercase :Dict = layer_norm
lowercase :Dict = k.T
lowercase :Union[str, Any] = o.T
lowercase :List[Any] = q.T
lowercase :int = v.T
# Block i, layer 1 (MLP).
lowercase :Optional[int] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :str = tax_mlp_lookup(a_ , a_ , '''encoder''' , a_)
lowercase :int = layer_norm
if split_mlp_wi:
lowercase :Tuple = wi[0].T
lowercase :Tuple = wi[1].T
else:
lowercase :int = wi.T
lowercase :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Dict = tax_relpos_bias_lookup(
a_ , a_ , '''encoder''').T
lowercase :str = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowercase :str = tax_relpos_bias_lookup(
a_ , 0 , '''encoder''').T
lowercase :List[Any] = tax_relpos_bias_lookup(
a_ , 0 , '''decoder''').T
if not is_encoder_only:
# Decoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_self_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :str = tax_attention_lookup(a_ , a_ , '''decoder''' , '''self_attention''')
lowercase :List[str] = layer_norm
lowercase :Dict = k.T
lowercase :List[Any] = o.T
lowercase :List[Any] = q.T
lowercase :Any = v.T
# Block i, layer 1 (Cross Attention).
lowercase :Tuple = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_cross_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :int = tax_attention_lookup(a_ , a_ , '''decoder''' , '''encoder_decoder_attention''')
lowercase :int = layer_norm
lowercase :Dict = k.T
lowercase :int = o.T
lowercase :List[Any] = q.T
lowercase :Tuple = v.T
# Block i, layer 2 (MLP).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :Tuple = tax_mlp_lookup(a_ , a_ , '''decoder''' , a_)
lowercase :Any = layer_norm
if split_mlp_wi:
lowercase :int = wi[0].T
lowercase :Union[str, Any] = wi[1].T
else:
lowercase :int = wi.T
lowercase :List[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Union[str, Any] = tax_relpos_bias_lookup(a_ , a_ , '''decoder''').T
lowercase :Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase :int = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCamelCase (a_ :Dict , a_ :bool) -> Tuple:
lowercase :str = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase :Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase :Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''')
lowercase :Optional[int] = state_dict['''shared.weight''']
return state_dict
def lowerCamelCase (a_ :List[str] , a_ :List[str] , a_ :Tuple , a_ :Optional[int] , a_ :List[str]) -> List[str]:
lowercase :Optional[Any] = checkpoints.load_tax_checkpoint(a_)
lowercase :Optional[int] = convert_tax_to_pytorch(
a_ , num_layers=config.num_layers , is_encoder_only=a_ , scalable_attention=a_)
lowercase :Union[str, Any] = make_state_dict(a_ , a_)
model.load_state_dict(a_ , strict=a_)
def lowerCamelCase (a_ :str , a_ :Optional[int] , a_ :Any , a_ :bool = False , a_ :bool = False , ) -> Tuple:
lowercase :Optional[int] = MTaConfig.from_json_file(a_)
print(F"""Building PyTorch model from configuration: {config}""")
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase :Union[str, Any] = UMTaEncoderModel(a_)
else:
lowercase :int = UMTaForConditionalGeneration(a_)
# Load weights from tf checkpoint
load_tax_weights_in_ta(a_ , a_ , a_ , a_ , a_)
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""")
model.save_pretrained(a_)
# Verify that we can load the checkpoint.
model.from_pretrained(a_)
print('''Done''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 677
| 1
|
def _A( UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
UpperCAmelCase__ = int(input("Enter number: ").strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 362
|
import math
def _A( ) -> None:
'''simple docstring'''
__lowercase = input('''Enter message: ''' )
__lowercase = int(input(F'Enter key [2-{len(UpperCamelCase__ ) - 1}]: ' ) )
__lowercase = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__lowercase = encrypt_message(UpperCamelCase__ , UpperCamelCase__ )
elif mode.lower().startswith('''d''' ):
__lowercase = decrypt_message(UpperCamelCase__ , UpperCamelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}' )
def _A( UpperCamelCase__ : int , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
__lowercase = [''''''] * key
for col in range(UpperCamelCase__ ):
__lowercase = col
while pointer < len(UpperCamelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(UpperCamelCase__ )
def _A( UpperCamelCase__ : int , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
__lowercase = math.ceil(len(UpperCamelCase__ ) / key )
__lowercase = key
__lowercase = (num_cols * num_rows) - len(UpperCamelCase__ )
__lowercase = [''''''] * num_cols
__lowercase = 0
__lowercase = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__lowercase = 0
row += 1
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 362
| 1
|
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ):
_lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_lowerCamelCase = shift_tokens_right(lowerCamelCase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
_lowerCamelCase = model(lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits
_lowerCamelCase = optax.softmax_cross_entropy(lowerCamelCase__ , onehot(lowerCamelCase__ , logits.shape[-1] ) ).mean()
_lowerCamelCase = -(labels.shape[-1] * loss.item())
_lowerCamelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 661
|
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__SCREAMING_SNAKE_CASE : str = tuple[int, int]
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = vertices
_lowerCamelCase = {
(min(lowerCamelCase__ ), max(lowerCamelCase__ )): weight for edge, weight in edges.items()
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_lowerCamelCase = weight
def snake_case__ ( self ):
_lowerCamelCase = Graph({min(self.vertices )} , {} )
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
_lowerCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_lowerCamelCase = edge
_lowerCamelCase = weight
subgraph.add_edge(lowerCamelCase__ , lowerCamelCase__ )
return subgraph
def lowerCAmelCase_( lowercase_ : str = "p107_network.txt" ) -> int:
_lowerCamelCase = os.path.abspath(os.path.dirname(lowercase_ ) )
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
_lowerCamelCase = {}
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
with open(lowercase_ ) as f:
_lowerCamelCase = f.read().strip().split('''\n''' )
_lowerCamelCase = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowercase_ ) ):
for edgea in range(lowercase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
_lowerCamelCase = int(adjaceny_matrix[edgea][edgea] )
_lowerCamelCase = Graph(set(range(len(lowercase_ ) ) ) , lowercase_ )
_lowerCamelCase = graph.prims_algorithm()
_lowerCamelCase = sum(graph.edges.values() )
_lowerCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 661
| 1
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowerCAmelCase = """bart"""
_lowerCAmelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
_lowerCAmelCase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_lowerCAmelCase : int = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_lowerCAmelCase : Union[str, Any] = qar_model.eval()
else:
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_lowerCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_lowerCAmelCase : List[str] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_lowerCAmelCase : List[str] = sas_model.eval()
else:
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
_lowerCAmelCase : Dict = faiss.StandardGpuResources()
_lowerCAmelCase : List[str] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_lowerCAmelCase : Dict = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
_lowerCAmelCase : Tuple = faiss.IndexFlatIP(128 )
_lowerCAmelCase : List[Any] = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
_lowerCAmelCase, _lowerCAmelCase : List[str] = (None, None)
_lowerCAmelCase : Tuple = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_lowerCAmelCase : Optional[int] = elia['train_eli5']
_lowerCAmelCase : Union[str, Any] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
_lowerCAmelCase : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = load_indexes()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = load_models()
_lowerCAmelCase , _lowerCAmelCase = load_train_data()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=10 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase, _lowerCAmelCase : Dict = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ):
'''simple docstring'''
if source == "none":
_lowerCAmelCase, _lowerCAmelCase : str = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowerCAmelCase, _lowerCAmelCase : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name='english_wiki40b_snippets_100w' , n_results=_lowerCamelCase , )
_lowerCAmelCase : List[Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_lowerCAmelCase : Any = 'question: {} context: {}'.format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_lowerCAmelCase = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_lowerCAmelCase = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowerCAmelCase = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowerCAmelCase = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_lowerCAmelCase = st.sidebar.checkbox("""Demo options""")
if demo_options:
_lowerCAmelCase = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_lowerCAmelCase = action_list.index(action_st)
_lowerCAmelCase = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_lowerCAmelCase = show_type == """Show full text of passages"""
else:
_lowerCAmelCase = 3
_lowerCAmelCase = True
_lowerCAmelCase = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_lowerCAmelCase = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_lowerCAmelCase = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_lowerCAmelCase = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_lowerCAmelCase = """wiki40b"""
_lowerCAmelCase = """dense"""
_lowerCAmelCase = """beam"""
_lowerCAmelCase = 2
_lowerCAmelCase = 6_4
_lowerCAmelCase = 2_5_6
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = st.sidebar.checkbox("""Generation options""")
if generate_options:
_lowerCAmelCase = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_lowerCAmelCase = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_lowerCAmelCase = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
_lowerCAmelCase = st.sidebar.slider(
"""Maximum generation length""", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
_lowerCAmelCase = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowerCAmelCase = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowerCAmelCase = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowerCAmelCase = None
# start main text
_lowerCAmelCase = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_lowerCAmelCase = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowerCAmelCase = st.text_input("""Enter your question here:""", """""")
else:
_lowerCAmelCase = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowerCAmelCase , _lowerCAmelCase = make_support(question, source=wiki_source, method="""dense""", n_results=1_0)
_lowerCAmelCase , _lowerCAmelCase = make_support(question, source=wiki_source, method="""sparse""", n_results=1_0)
_lowerCAmelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowerCAmelCase = support_list[:1_0]
_lowerCAmelCase = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_lowerCAmelCase , _lowerCAmelCase = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
_lowerCAmelCase , _lowerCAmelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_lowerCAmelCase = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_lowerCAmelCase = res[1].strip()
if sec_titles == "":
_lowerCAmelCase = """[{}]({})""".format(res[0], wiki_url)
else:
_lowerCAmelCase = sec_titles.split(""" & """)
_lowerCAmelCase = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_lowerCAmelCase = find_nearest_training(question)
_lowerCAmelCase = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_lowerCAmelCase = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_lowerCAmelCase = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 16
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16
| 1
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowercase_ = 5
lowercase_ = 10
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = SpeechaTextTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = True
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : Union[str, Any] = sp.SentencePieceProcessor()
spm_model.Load(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_A ) )]
__SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
__SCREAMING_SNAKE_CASE : List[str] = Path(self.tmpdirname )
save_json(_A , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_A , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
__SCREAMING_SNAKE_CASE : Optional[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<pad>'''
__SCREAMING_SNAKE_CASE : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_A ) , 1001 )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [289, 50, 14, 174, 386] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(_A , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = {'''input_ids''': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = '''valhalla/s2t_mustc_multilinguial_medium'''
lowerCAmelCase_ = '''C\'est trop cool'''
lowerCAmelCase_ = '''Esto es genial'''
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_0000 )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
self.assertIn(_A , self.tokenizer.all_special_ids )
__SCREAMING_SNAKE_CASE : Any = [ES_CODE, 4, 1601, 47, 7647, 2]
__SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(_A , skip_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = '''fr'''
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _A )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__SCREAMING_SNAKE_CASE : Optional[int] = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 74
|
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _UpperCamelCase ( UpperCamelCase__ ):
if not is_accelerate_available():
return method
UpperCAmelCase__ : Any = version.parse(accelerate.__version__ ).base_version
if version.parse(UpperCamelCase__ ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *UpperCamelCase__ , **UpperCamelCase__ ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *UpperCamelCase__ , **UpperCamelCase__ )
return wrapper
| 407
| 0
|
'''simple docstring'''
from math import loga
def __snake_case ( lowercase : int ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase , lowercase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420
|
'''simple docstring'''
import os
from pathlib import Path
def __snake_case ( ):
from torch.utils.cpp_extension import load
snake_case_ = Path(lowercase ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
snake_case_ = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , lowercase , with_cuda=lowercase , extra_include_paths=[str(lowercase )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 420
| 1
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[Any]=None ):
SCREAMING_SNAKE_CASE__ = None
if token is not None:
SCREAMING_SNAKE_CASE__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
SCREAMING_SNAKE_CASE__ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE__ = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json()
SCREAMING_SNAKE_CASE__ = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
SCREAMING_SNAKE_CASE__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = requests.get(url + f'''&page={i + 2}''' , headers=UpperCamelCase__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: str=None ):
SCREAMING_SNAKE_CASE__ = None
if token is not None:
SCREAMING_SNAKE_CASE__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
SCREAMING_SNAKE_CASE__ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
SCREAMING_SNAKE_CASE__ = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json()
SCREAMING_SNAKE_CASE__ = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
SCREAMING_SNAKE_CASE__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = requests.get(url + f'''&page={i + 2}''' , headers=UpperCamelCase__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = None
if token is not None:
SCREAMING_SNAKE_CASE__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
SCREAMING_SNAKE_CASE__ = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ , allow_redirects=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = result.headers["""Location"""]
SCREAMING_SNAKE_CASE__ = requests.get(UpperCamelCase__ , allow_redirects=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , f'''{artifact_name}.zip''' )
with open(UpperCamelCase__ , """wb""" ) as fp:
fp.write(response.content )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Any=None ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = None
with zipfile.ZipFile(UpperCamelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCamelCase__ ) as f:
for line in f:
SCREAMING_SNAKE_CASE__ = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE__ = line[: line.index(""": """ )]
SCREAMING_SNAKE_CASE__ = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE__ = line[len("""FAILED """ ) :]
failed_tests.append(UpperCamelCase__ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE__ = line
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCamelCase__ )} for `errors` '''
f'''and {len(UpperCamelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
SCREAMING_SNAKE_CASE__ = None
if job_name and job_links:
SCREAMING_SNAKE_CASE__ = job_links.get(UpperCamelCase__ , UpperCamelCase__ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE__ = [x + [y] + [job_link] for x, y in zip(UpperCamelCase__ , UpperCamelCase__ )]
return result
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for p in os.listdir(UpperCamelCase__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCamelCase__ , job_links=UpperCamelCase__ ) )
return errors
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: str=None ):
SCREAMING_SNAKE_CASE__ = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE__ = counter.most_common()
SCREAMING_SNAKE_CASE__ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE__ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE__ = dict(sorted(r.items() , key=lambda UpperCamelCase__ : item[1]["count"] , reverse=UpperCamelCase__ ) )
return r
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
SCREAMING_SNAKE_CASE__ = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
SCREAMING_SNAKE_CASE__ = test.split("""/""" )[2]
else:
SCREAMING_SNAKE_CASE__ = None
return test
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
SCREAMING_SNAKE_CASE__ = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE__ = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE__ = {x[2] for x in logs}
SCREAMING_SNAKE_CASE__ = {}
for test in tests:
SCREAMING_SNAKE_CASE__ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE__ = counter.most_common()
SCREAMING_SNAKE_CASE__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE__ = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE__ = {"""count""": n_errors, """errors""": error_counts}
SCREAMING_SNAKE_CASE__ = dict(sorted(r.items() , key=lambda UpperCamelCase__ : item[1]["count"] , reverse=UpperCamelCase__ ) )
return r
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
SCREAMING_SNAKE_CASE__ = """| no. | error | status |"""
SCREAMING_SNAKE_CASE__ = """|-:|:-|:-|"""
SCREAMING_SNAKE_CASE__ = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE__ = reduced_by_error[error]["""count"""]
SCREAMING_SNAKE_CASE__ = f'''| {count} | {error[:100]} | |'''
lines.append(UpperCamelCase__ )
return "\n".join(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
SCREAMING_SNAKE_CASE__ = """| model | no. of errors | major error | count |"""
SCREAMING_SNAKE_CASE__ = """|-:|-:|-:|-:|"""
SCREAMING_SNAKE_CASE__ = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE__ = reduced_by_model[model]["""count"""]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = list(reduced_by_model[model]["""errors"""].items() )[0]
SCREAMING_SNAKE_CASE__ = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(UpperCamelCase__ )
return "\n".join(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
_lowerCamelCase = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_lowerCamelCase = get_job_links(args.workflow_run_id, token=args.token)
_lowerCamelCase = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_lowerCamelCase = k.find(' / ')
_lowerCamelCase = k[index + len(' / ') :]
_lowerCamelCase = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_lowerCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_lowerCamelCase = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_lowerCamelCase = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_lowerCamelCase = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_lowerCamelCase = reduce_by_error(errors)
_lowerCamelCase = reduce_by_model(errors)
_lowerCamelCase = make_github_table(reduced_by_error)
_lowerCamelCase = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 6
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a :Dict = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ = ""
else:
A_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = dct.pop(__UpperCamelCase )
A_ = val
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = ViTConfig()
A_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ = True
A_ = int(vit_name[-12:-10] )
A_ = int(vit_name[-9:-6] )
else:
A_ = 1000
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = int(vit_name[-6:-4] )
A_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
A_ = 192
A_ = 768
A_ = 12
A_ = 3
elif vit_name[9:].startswith("small" ):
A_ = 384
A_ = 1536
A_ = 12
A_ = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
A_ = 768
A_ = 2304
A_ = 8
A_ = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
A_ = 1024
A_ = 4096
A_ = 24
A_ = 16
elif vit_name[4:].startswith("huge" ):
A_ = 1280
A_ = 5120
A_ = 32
A_ = 16
# load original model from timm
A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ = ViTModel(__UpperCamelCase ).eval()
else:
A_ = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ = DeiTImageProcessor(size=config.image_size )
else:
A_ = ViTImageProcessor(size=config.image_size )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" )
A_ = encoding["pixel_values"]
A_ = model(__UpperCamelCase )
if base_model:
A_ = timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 )
else:
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a :Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 86
| 0
|
import pprint
import requests
UpperCAmelCase_ : int = """https://zenquotes.io/api"""
def _lowerCAmelCase ( ) -> int:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _lowerCAmelCase ( ) -> Dict:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = random_quotes()
pprint.pprint(response)
| 711
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Tuple = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 440
| 0
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 147
|
from __future__ import annotations
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = text, pattern
_UpperCamelCase , _UpperCamelCase = len(lowerCamelCase_ ), len(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowercase ( self , lowerCamelCase_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowercase ( self ) -> list[int]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_UpperCamelCase = self.mismatch_in_text(lowerCamelCase_ )
if mismatch_index == -1:
positions.append(lowerCamelCase_ )
else:
_UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] )
_UpperCamelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__lowerCAmelCase = """ABAABA"""
__lowerCAmelCase = """AB"""
__lowerCAmelCase = BoyerMooreSearch(text, pattern)
__lowerCAmelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 147
| 1
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =42
__UpperCamelCase =42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 673
|
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673
| 1
|
from math import factorial, radians
def A ( __UpperCamelCase , __UpperCamelCase = 18 , __UpperCamelCase = 10 ) -> float:
A__ = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
A__ = radians(__UpperCamelCase )
A__ = angle_in_radians
A__ = 3
A__ = -1
for _ in range(__UpperCamelCase ):
result += (b * (angle_in_radians**a)) / factorial(__UpperCamelCase )
A__ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 9
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __get__( self : List[Any] ,lowercase_ : Any ,lowercase_ : List[str]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
lowerCAmelCase__ : Optional[Any] = '''__cached_''' + self.fget.__name__
lowerCAmelCase__ : Any = getattr(lowercase_ ,lowercase_ ,lowercase_ )
if cached is None:
lowerCAmelCase__ : str = self.fget(lowercase_ )
setattr(lowercase_ ,lowercase_ ,lowercase_ )
return cached
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : int = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def __SCREAMING_SNAKE_CASE ( A_ ):
if is_torch_fx_proxy(A_ ):
return True
if is_torch_available():
import torch
if isinstance(A_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(A_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(A_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(A_ , np.ndarray )
def __SCREAMING_SNAKE_CASE ( A_ ):
return isinstance(A_ , np.ndarray )
def __SCREAMING_SNAKE_CASE ( A_ ):
return _is_numpy(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import torch
return isinstance(A_ , torch.Tensor )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_torch_available() else _is_torch(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import torch
return isinstance(A_ , torch.device )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_torch_available() else _is_torch_device(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import torch
if isinstance(A_ , A_ ):
if hasattr(A_ , A_ ):
lowerCAmelCase__ : int = getattr(A_ , A_ )
else:
return False
return isinstance(A_ , torch.dtype )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_torch_available() else _is_torch_dtype(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import tensorflow as tf
return isinstance(A_ , tf.Tensor )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_tf_available() else _is_tensorflow(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(A_ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(A_ )
return type(A_ ) == tf.Tensor
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import jax.numpy as jnp # noqa: F811
return isinstance(A_ , jnp.ndarray )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_flax_available() else _is_jax(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
if isinstance(A_ , (dict, UserDict) ):
return {k: to_py_obj(A_ ) for k, v in obj.items()}
elif isinstance(A_ , (list, tuple) ):
return [to_py_obj(A_ ) for o in obj]
elif is_tf_tensor(A_ ):
return obj.numpy().tolist()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(A_ ):
return np.asarray(A_ ).tolist()
elif isinstance(A_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __SCREAMING_SNAKE_CASE ( A_ ):
if isinstance(A_ , (dict, UserDict) ):
return {k: to_numpy(A_ ) for k, v in obj.items()}
elif isinstance(A_ , (list, tuple) ):
return np.array(A_ )
elif is_tf_tensor(A_ ):
return obj.numpy()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(A_ ):
return np.asarray(A_ )
else:
return obj
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[int] = fields(self )
# Safety and consistency checks
if not len(lowercase_ ):
raise ValueError(F'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' )
lowerCAmelCase__ : str = getattr(self ,class_fields[0].name )
lowerCAmelCase__ : List[str] = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowercase_ ):
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : str = first_field.items()
lowerCAmelCase__ : List[str] = True
else:
try:
lowerCAmelCase__ : Union[str, Any] = iter(lowercase_ )
lowerCAmelCase__ : int = True
except TypeError:
lowerCAmelCase__ : Dict = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowercase_ ):
if (
not isinstance(lowercase_ ,(list, tuple) )
or not len(lowercase_ ) == 2
or not isinstance(element[0] ,lowercase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase__ : Tuple = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
lowerCAmelCase__ : Dict = element[1]
elif first_field is not None:
lowerCAmelCase__ : Any = first_field
else:
for field in class_fields:
lowerCAmelCase__ : Any = getattr(self ,field.name )
if v is not None:
lowerCAmelCase__ : List[str] = v
def __delitem__( self : List[str] ,*lowercase_ : List[str] ,**lowercase_ : Any ):
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self : Optional[int] ,*lowercase_ : Union[str, Any] ,**lowercase_ : List[Any] ):
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self : str ,*lowercase_ : Union[str, Any] ,**lowercase_ : Any ):
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self : int ,*lowercase_ : List[str] ,**lowercase_ : int ):
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self : Any ,lowercase_ : Any ):
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : Optional[Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Dict ,lowercase_ : Dict ,lowercase_ : int ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowercase_ ,lowercase_ )
super().__setattr__(lowercase_ ,lowercase_ )
def __setitem__( self : str ,lowercase_ : Optional[int] ,lowercase_ : Optional[Any] ):
# Will raise a KeyException if needed
super().__setitem__(lowercase_ ,lowercase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ):
return tuple(self[k] for k in self.keys() )
class SCREAMING_SNAKE_CASE ( a_ , a_ ):
"""simple docstring"""
@classmethod
def __lowerCAmelCase ( cls : Dict ,lowercase_ : Optional[Any] ):
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "longest"
lowercase__ = "max_length"
lowercase__ = "do_not_pad"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "pt"
lowercase__ = "tf"
lowercase__ = "np"
lowercase__ = "jax"
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : List[ContextManager] ):
lowerCAmelCase__ : Optional[int] = context_managers
lowerCAmelCase__ : Tuple = ExitStack()
def __enter__( self : str ):
for context_manager in self.context_managers:
self.stack.enter_context(lowercase_ )
def __exit__( self : Tuple ,*lowercase_ : Tuple ,**lowercase_ : List[Any] ):
self.stack.__exit__(*lowercase_ ,**lowercase_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Union[str, Any] = infer_framework(A_ )
if framework == "tf":
lowerCAmelCase__ : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ : Any = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ : Dict = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : List[str] = model_class.__name__
lowerCAmelCase__ : List[Any] = infer_framework(A_ )
if framework == "tf":
lowerCAmelCase__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ : Optional[int] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __SCREAMING_SNAKE_CASE ( A_ , A_ = "" , A_ = "." ):
def _flatten_dict(A_ , A_="" , A_="." ):
for k, v in d.items():
lowerCAmelCase__ : Any = str(A_ ) + delimiter + str(A_ ) if parent_key else k
if v and isinstance(A_ , A_ ):
yield from flatten_dict(A_ , A_ , delimiter=A_ ).items()
else:
yield key, v
return dict(_flatten_dict(A_ , A_ , A_ ) )
@contextmanager
def __SCREAMING_SNAKE_CASE ( A_ , A_ = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __SCREAMING_SNAKE_CASE ( A_ , A_=None ):
if is_numpy_array(A_ ):
return np.transpose(A_ , axes=A_ )
elif is_torch_tensor(A_ ):
return array.T if axes is None else array.permute(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.transpose(A_ , perm=A_ )
elif is_jax_tensor(A_ ):
return jnp.transpose(A_ , axes=A_ )
else:
raise ValueError(f'Type not supported for transpose: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
if is_numpy_array(A_ ):
return np.reshape(A_ , A_ )
elif is_torch_tensor(A_ ):
return array.reshape(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.reshape(A_ , A_ )
elif is_jax_tensor(A_ ):
return jnp.reshape(A_ , A_ )
else:
raise ValueError(f'Type not supported for reshape: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_=None ):
if is_numpy_array(A_ ):
return np.squeeze(A_ , axis=A_ )
elif is_torch_tensor(A_ ):
return array.squeeze() if axis is None else array.squeeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.squeeze(A_ , axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.squeeze(A_ , axis=A_ )
else:
raise ValueError(f'Type not supported for squeeze: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
if is_numpy_array(A_ ):
return np.expand_dims(A_ , A_ )
elif is_torch_tensor(A_ ):
return array.unsqueeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.expand_dims(A_ , axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.expand_dims(A_ , axis=A_ )
else:
raise ValueError(f'Type not supported for expand_dims: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ ):
if is_numpy_array(A_ ):
return np.size(A_ )
elif is_torch_tensor(A_ ):
return array.numel()
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.size(A_ )
elif is_jax_tensor(A_ ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
for key, value in auto_map.items():
if isinstance(A_ , (tuple, list) ):
lowerCAmelCase__ : Tuple = [f'{repo_id}--{v}' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase__ : Tuple = f'{repo_id}--{value}'
return auto_map
def __SCREAMING_SNAKE_CASE ( A_ ):
for base_class in inspect.getmro(A_ ):
lowerCAmelCase__ : List[str] = base_class.__module__
lowerCAmelCase__ : List[Any] = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 450
| 0
|
import numpy as np
def A_ ( A__ ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Tuple = ['''image_processor''', '''tokenizer''']
__A : Any = '''ChineseCLIPImageProcessor'''
__A : Tuple = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , lowercase=None , lowercase=None , **lowercase) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase , )
a__ : Optional[Any] = kwargs.pop('feature_extractor')
a__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowercase , lowercase)
a__ : List[str] = self.image_processor
def __call__( self , lowercase=None , lowercase=None , lowercase=None , **lowercase) -> List[str]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
a__ : str = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase)
if images is not None:
a__ : Optional[Any] = self.image_processor(lowercase , return_tensors=lowercase , **lowercase)
if text is not None and images is not None:
a__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase) , tensor_type=lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase)
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = self.tokenizer.model_input_names
a__ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase , )
return self.image_processor_class
| 392
| 0
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
if not (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
lowerCAmelCase__ : str = len(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = len(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Optional[Any] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowerCAmelCase__ : Dict = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowerCAmelCase__ : Any = i
lowerCAmelCase__ : Dict = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_A = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _lowerCamelCase :
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=16 , UpperCamelCase : List[str]=13 , UpperCamelCase : Any=7 , UpperCamelCase : str=14 , UpperCamelCase : List[Any]=10 , UpperCamelCase : Any=19 , UpperCamelCase : Optional[int]=5 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=16 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : List[Any]=4 , UpperCamelCase : str=4 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : List[Any]=[1, 2, 3, 4, 5] , UpperCamelCase : str=25 , UpperCamelCase : Any=5 , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = d_model
lowerCAmelCase__ : Tuple = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Dict = prediction_length
lowerCAmelCase__ : Tuple = context_length
lowerCAmelCase__ : Any = cardinality
lowerCAmelCase__ : Any = num_time_features
lowerCAmelCase__ : Tuple = lags_sequence
lowerCAmelCase__ : Tuple = embedding_dimension
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : List[Any] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : int = context_length
lowerCAmelCase__ : Union[str, Any] = prediction_length + label_length
lowerCAmelCase__ : Optional[Any] = label_length
lowerCAmelCase__ : Union[str, Any] = moving_average
lowerCAmelCase__ : Any = autocorrelation_factor
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = config.context_length + max(config.lags_sequence )
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCAmelCase__ : Dict = floats_tensor([self.batch_size, _past_length] )
lowerCAmelCase__ : int = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length] )
lowerCAmelCase__ : Optional[int] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_config()
lowerCAmelCase__ : Optional[Any] = self.prepare_autoformer_inputs_dict(UpperCamelCase )
return config, inputs_dict
def _lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AutoformerModel(config=UpperCamelCase ).to(UpperCamelCase ).eval()
lowerCAmelCase__ : List[str] = model(**UpperCamelCase )
lowerCAmelCase__ : Any = outputs.encoder_last_hidden_state
lowerCAmelCase__ : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : List[str] = model.get_encoder()
encoder.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : Any = AutoformerEncoder.from_pretrained(UpperCamelCase ).to(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = model.create_network_inputs(**UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCAmelCase__ : int = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCAmelCase__ : List[Any] = encoder(inputs_embeds=UpperCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowerCAmelCase__ : Tuple = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCAmelCase__ : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCAmelCase__ : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCAmelCase__ : List[Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[Any] = model.get_decoder()
decoder.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[str] = AutoformerDecoder.from_pretrained(UpperCamelCase ).to(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = decoder(
trend=UpperCamelCase , inputs_embeds=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _lowerCamelCase ( a_ , a_ , unittest.TestCase ):
_lowerCamelCase :Tuple = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCamelCase :int = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCamelCase :int = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_lowerCamelCase :Tuple = False
_lowerCamelCase :int = False
_lowerCamelCase :List[Any] = False
_lowerCamelCase :Optional[int] = False
_lowerCamelCase :int = False
_lowerCamelCase :Any = False
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoformerModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : int = model_class.from_pretrained(UpperCamelCase , output_loading_info=UpperCamelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = inspect.signature(getattr(UpperCamelCase , """forward""" ) )
# The main input is the name of the argument after `self`
lowerCAmelCase__ : str = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase )
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(UpperCamelCase )
lowerCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase__ : str = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase )] , UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Optional[int] = getattr(self.model_tester , """seq_length""" , UpperCamelCase )
lowerCAmelCase__ : List[str] = getattr(self.model_tester , """decoder_seq_length""" , UpperCamelCase )
lowerCAmelCase__ : Tuple = getattr(self.model_tester , """encoder_seq_length""" , UpperCamelCase )
lowerCAmelCase__ : List[str] = getattr(self.model_tester , """d_model""" , UpperCamelCase )
lowerCAmelCase__ : Any = getattr(self.model_tester , """num_attention_heads""" , UpperCamelCase )
lowerCAmelCase__ : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : Dict = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Tuple = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : str = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCAmelCase__ : int = len(UpperCamelCase )
lowerCAmelCase__ : int = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase , UpperCamelCase )
# decoder attentions
lowerCAmelCase__ : List[str] = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase , (list, tuple) )
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCAmelCase__ : int = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase , (list, tuple) )
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCAmelCase__ : int = True
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Dict = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase ) )
lowerCAmelCase__ : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowercase_ ( __UpperCAmelCase="train-batch.pt" ) -> Optional[int]:
lowerCAmelCase__ : Any = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=__UpperCAmelCase , repo_type="""dataset""" )
lowerCAmelCase__ : Optional[int] = torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )
return batch
@require_torch
@slow
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase )
lowerCAmelCase__ : List[str] = prepare_batch()
with torch.no_grad():
lowerCAmelCase__ : Dict = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
lowerCAmelCase__ : str = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase )
lowerCAmelCase__ : List[Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowerCAmelCase__ : Dict = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
lowerCAmelCase__ : int = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase )
lowerCAmelCase__ : str = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
lowerCAmelCase__ : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase )
lowerCAmelCase__ : int = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase )
lowerCAmelCase__ : List[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase , rtol=1E-1 ) )
| 299
| 1
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_a : int= [
"good first issue",
"feature request",
"wip",
]
def __UpperCAmelCase ( ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = Github(os.environ['GITHUB_TOKEN'] )
__snake_case : List[str] = g.get_repo('huggingface/accelerate' )
__snake_case : Any = repo.get_issues(state='open' )
for issue in open_issues:
__snake_case : Tuple = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCAmelCase_ : i.created_at , reverse=UpperCAmelCase_ )
__snake_case : Any = comments[0] if len(UpperCAmelCase_ ) > 0 else None
__snake_case : Union[str, Any] = dt.utcnow()
__snake_case : int = (current_time - issue.updated_at).days
__snake_case : str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 192
|
"""simple docstring"""
import string
import numpy
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase_ )
class UpperCamelCase :
UpperCAmelCase : Any = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase : List[Any] = numpy.vectorize(lambda lowercase : x % 36 )
UpperCAmelCase : Dict = numpy.vectorize(lowercase )
def __init__(self : str , _A : numpy.ndarray) -> None:
__snake_case : str = self.modulus(_A) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__snake_case : Optional[Any] = encrypt_key.shape[0]
def _lowercase (self : Any , _A : str) -> int:
return self.key_string.index(_A)
def _lowercase (self : Union[str, Any] , _A : int) -> str:
return self.key_string[round(_A)]
def _lowercase (self : Optional[int]) -> None:
__snake_case : Any = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
__snake_case : Any = det % len(self.key_string)
__snake_case : Tuple = len(self.key_string)
if greatest_common_divisor(_A , len(self.key_string)) != 1:
__snake_case : List[str] = (
f"determinant modular {req_l} of encryption key({det}) "
f"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(_A)
def _lowercase (self : Dict , _A : str) -> str:
__snake_case : str = [char for char in text.upper() if char in self.key_string]
__snake_case : int = chars[-1]
while len(_A) % self.break_key != 0:
chars.append(_A)
return "".join(_A)
def _lowercase (self : Union[str, Any] , _A : str) -> str:
__snake_case : Any = self.process_text(text.upper())
__snake_case : Dict = ''
for i in range(0 , len(_A) - self.break_key + 1 , self.break_key):
__snake_case : Dict = text[i : i + self.break_key]
__snake_case : List[str] = [self.replace_letters(_A) for char in batch]
__snake_case : str = numpy.array([vec]).T
__snake_case : List[Any] = self.modulus(self.encrypt_key.dot(_A)).T.tolist()[
0
]
__snake_case : str = ''.join(
self.replace_digits(_A) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def _lowercase (self : Optional[int]) -> numpy.ndarray:
__snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
__snake_case : int = det % len(self.key_string)
__snake_case : Optional[Any] = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
__snake_case : Dict = i
break
__snake_case : List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(_A))
def _lowercase (self : int , _A : str) -> str:
__snake_case : int = self.make_decrypt_key()
__snake_case : List[str] = self.process_text(text.upper())
__snake_case : str = ''
for i in range(0 , len(_A) - self.break_key + 1 , self.break_key):
__snake_case : Optional[Any] = text[i : i + self.break_key]
__snake_case : Union[str, Any] = [self.replace_letters(_A) for char in batch]
__snake_case : Tuple = numpy.array([vec]).T
__snake_case : List[str] = self.modulus(decrypt_key.dot(_A)).T.tolist()[0]
__snake_case : str = ''.join(
self.replace_digits(_A) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : List[str] = int(input('Enter the order of the encryption key: ' ) )
__snake_case : str = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(UpperCAmelCase_ ):
__snake_case : Union[str, Any] = [int(UpperCAmelCase_ ) for x in input().split()]
hill_matrix.append(UpperCAmelCase_ )
__snake_case : Dict = HillCipher(numpy.array(UpperCAmelCase_ ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
__snake_case : Optional[Any] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
__snake_case : int = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(UpperCAmelCase_ ) )
elif option == "2":
__snake_case : Tuple = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 192
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.