code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def UpperCamelCase_( lowerCamelCase_ ) -> list:
_lowercase : Optional[Any] = len(lowerCamelCase_ )
for i in range(1 , lowerCamelCase_ ):
_lowercase : Tuple = collection[i]
_lowercase : str = 0
_lowercase : List[str] = i - 1
while low <= high:
_lowercase : int = (low + high) // 2
if val < collection[mid]:
_lowercase : Union[str, Any] = mid - 1
else:
_lowercase : int = mid + 1
for j in range(lowerCamelCase_ , lowerCamelCase_ , -1 ):
_lowercase : Optional[Any] = collection[j - 1]
_lowercase : List[str] = val
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE : int = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 89
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "swin2sr"
__UpperCamelCase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , A__ : int=6_4 , A__ : List[Any]=1 , A__ : List[Any]=3 , A__ : Any=1_8_0 , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Dict=8 , A__ : Any=2.0 , A__ : Optional[int]=True , A__ : Union[str, Any]=0.0 , A__ : Union[str, Any]=0.0 , A__ : List[str]=0.1 , A__ : Any="gelu" , A__ : Tuple=False , A__ : Optional[int]=0.02 , A__ : List[Any]=1E-5 , A__ : Any=2 , A__ : Union[str, Any]=1.0 , A__ : Dict="1conv" , A__ : Optional[Any]="pixelshuffle" , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A__ )
a__ : List[str] = image_size
a__ : Optional[Any] = patch_size
a__ : Dict = num_channels
a__ : Optional[int] = embed_dim
a__ : int = depths
a__ : Optional[int] = len(A__ )
a__ : Dict = num_heads
a__ : List[Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : Optional[int] = qkv_bias
a__ : Union[str, Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = drop_path_rate
a__ : int = hidden_act
a__ : int = use_absolute_embeddings
a__ : Dict = layer_norm_eps
a__ : List[str] = initializer_range
a__ : List[Any] = upscale
a__ : List[Any] = img_range
a__ : Optional[int] = resi_connection
a__ : int = upsampler
| 688
| 0
|
import string
import numpy
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , lowerCamelCase_ )
class _lowerCamelCase:
lowercase_ : Tuple = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase_ : List[Any] = numpy.vectorize(lambda _a : x % 36 )
lowercase_ : Union[str, Any] = numpy.vectorize(_a )
def __init__( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : str = self.modulus(lowerCamelCase) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_lowercase : str = encrypt_key.shape[0]
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
return self.key_string.index(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
return self.key_string[round(lowerCamelCase)]
def UpperCamelCase ( self) -> None:
"""simple docstring"""
_lowercase : Optional[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_lowercase : Tuple = det % len(self.key_string)
_lowercase : Optional[Any] = len(self.key_string)
if greatest_common_divisor(lowerCamelCase, len(self.key_string)) != 1:
_lowercase : int = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = [char for char in text.upper() if char in self.key_string]
_lowercase : Optional[Any] = chars[-1]
while len(lowerCamelCase) % self.break_key != 0:
chars.append(lowerCamelCase)
return "".join(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : str = self.process_text(text.upper())
_lowercase : List[Any] = ''
for i in range(0, len(lowerCamelCase) - self.break_key + 1, self.break_key):
_lowercase : Dict = text[i : i + self.break_key]
_lowercase : Dict = [self.replace_letters(lowerCamelCase) for char in batch]
_lowercase : List[str] = numpy.array([vec]).T
_lowercase : Optional[int] = self.modulus(self.encrypt_key.dot(lowerCamelCase)).T.tolist()[
0
]
_lowercase : List[str] = ''.join(
self.replace_digits(lowerCamelCase) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def UpperCamelCase ( self) -> numpy.ndarray:
"""simple docstring"""
_lowercase : Any = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_lowercase : Optional[int] = det % len(self.key_string)
_lowercase : Union[str, Any] = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
_lowercase : List[str] = i
break
_lowercase : Tuple = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Tuple = self.make_decrypt_key()
_lowercase : Optional[Any] = self.process_text(text.upper())
_lowercase : str = ''
for i in range(0, len(lowerCamelCase) - self.break_key + 1, self.break_key):
_lowercase : int = text[i : i + self.break_key]
_lowercase : int = [self.replace_letters(lowerCamelCase) for char in batch]
_lowercase : Optional[int] = numpy.array([vec]).T
_lowercase : List[Any] = self.modulus(decrypt_key.dot(lowerCamelCase)).T.tolist()[0]
_lowercase : Optional[Any] = ''.join(
self.replace_digits(lowerCamelCase) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def UpperCamelCase_( ) -> None:
_lowercase : Tuple = int(input('Enter the order of the encryption key: ' ) )
_lowercase : List[str] = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(lowerCamelCase_ ):
_lowercase : Optional[int] = [int(lowerCamelCase_ ) for x in input().split()]
hill_matrix.append(lowerCamelCase_ )
_lowercase : List[str] = HillCipher(numpy.array(lowerCamelCase_ ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_lowercase : Dict = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_lowercase : str = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(lowerCamelCase_ ) )
elif option == "2":
_lowercase : Optional[Any] = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 354
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class _lowerCamelCase( _a ):
@require_torch
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_lowercase : Union[str, Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_lowercase : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_lowercase : Union[str, Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase)
BertModel.from_pretrained(lowerCamelCase)
BertTokenizer.from_pretrained(lowerCamelCase)
pipeline(task='fill-mask', model=lowerCamelCase)
# baseline - just load from_pretrained with normal network
_lowercase : Any = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
_lowercase : List[str] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowercase : Any = '1'
_lowercase : List[Any] = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_lowercase : Union[str, Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_lowercase : List[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_lowercase : Any = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase)
BertModel.from_pretrained(lowerCamelCase)
BertTokenizer.from_pretrained(lowerCamelCase)
pipeline(task='fill-mask', model=lowerCamelCase)
# baseline - just load from_pretrained with normal network
_lowercase : List[str] = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
_lowercase : Any = self.get_env()
_lowercase : str = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_lowercase : int = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_lowercase : List[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_lowercase : int = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
_lowercase : List[str] = self.get_env()
_lowercase : Dict = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
# next emulate no network
_lowercase : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowercase : Optional[Any] = '1'
_lowercase : Optional[Any] = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = '\nfrom transformers import pipeline\n '
_lowercase : Dict = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_lowercase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_lowercase : Tuple = self.get_env()
_lowercase : Tuple = '1'
_lowercase : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, mock, run])]
_lowercase : Tuple = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 1, result.stderr)
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode', result.stderr.decode().replace('\n', ''), )
@require_torch
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = '\nfrom transformers import AutoModel\n '
_lowercase : int = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_lowercase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
_lowercase : int = self.get_env()
_lowercase : List[str] = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowercase : Tuple = '1'
_lowercase : Dict = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
| 354
| 1
|
class A : # Public class to implement a graph
def __init__( self: int , _lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: list[list[bool]] ) -> None:
'''simple docstring'''
UpperCAmelCase_ =row
UpperCAmelCase_ =col
UpperCAmelCase_ =graph
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: list[list[bool]] ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: list[list[bool]] ) -> None:
'''simple docstring'''
UpperCAmelCase_ =[-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase_ =[-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase_ =True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple ) -> int: # And finally, count all islands.
'''simple docstring'''
UpperCAmelCase_ =[[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase_ =0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
count += 1
return count
| 54
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =(DEISMultistepScheduler,)
SCREAMING_SNAKE_CASE_ : List[str] =(("num_inference_steps", 25),)
def __lowerCAmelCase ( self : Tuple , **SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict=0 , **SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase , UpperCamelCase = sample, sample
for t in range(SCREAMING_SNAKE_CASE__ , time_step + scheduler.config.solver_order + 1 ):
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCamelCase = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , **SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCamelCase = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
if scheduler is None:
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = 10
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , 'set_timesteps' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , 'set_timesteps' ):
UpperCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
UpperCamelCase = scheduler.timesteps[5]
UpperCamelCase = scheduler.timesteps[6]
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
UpperCamelCase = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
UpperCamelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCamelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCamelCase = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCamelCase = DEISMultistepScheduler.from_config(scheduler.config )
UpperCamelCase = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , algorithm_type='deis' , solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
assert not torch.isnan(SCREAMING_SNAKE_CASE__ ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ , time_step=0 )
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.full_loop()
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.full_loop(prediction_type='v_prediction' )
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE__ , dynamic_thresholding_ratio=0 )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = 10
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
assert sample.dtype == torch.floataa
| 282
| 0
|
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 1_6
a_ = 3_2
def _a( UpperCamelCase__ : Accelerator, UpperCamelCase__ : int = 1_6 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ : int =load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCamelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCamelCase__, max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : Dict =datasets.map(
UpperCamelCase__, batched=UpperCamelCase__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : List[Any] =tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCamelCase__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : str =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : str =1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Optional[Any] =8
else:
SCREAMING_SNAKE_CASE__ : Dict =None
return tokenizer.pad(
UpperCamelCase__, padding='''longest''', max_length=UpperCamelCase__, pad_to_multiple_of=UpperCamelCase__, return_tensors='''pt''', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : Optional[int] =DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__, drop_last=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__, drop_last=(accelerator.mixed_precision == '''fp8'''), )
return train_dataloader, eval_dataloader
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : int =config['''lr''']
SCREAMING_SNAKE_CASE__ : Optional[int] =int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ : Any =int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ : Any =int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE__ : str =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE__ : str =batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : Dict =MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =get_dataloaders(UpperCamelCase__, UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : Any =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : List[str] =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : List[str] =AdamW(params=model.parameters(), lr=UpperCamelCase__ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : int =get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__, num_warmup_steps=1_0_0, num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =accelerator.prepare(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =outputs.loss
SCREAMING_SNAKE_CASE__ : Union[str, Any] =loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase__, references=UpperCamelCase__, )
SCREAMING_SNAKE_CASE__ : int =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", UpperCamelCase__ )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=UpperCamelCase__, default=UpperCamelCase__, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =parser.parse_args()
SCREAMING_SNAKE_CASE__ : Tuple ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
main()
| 665
|
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665
| 1
|
"""simple docstring"""
from itertools import count
def a_ ( lowercase__ :int = 50 ):
__lowerCamelCase = [1] * min_block_length
for n in count(__lowerCamelCase ):
fill_count_functions.append(1 )
for block_length in range(__lowerCamelCase, n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 281
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_SCREAMING_SNAKE_CASE : Any = """sshleifer/bart-tiny-random"""
_SCREAMING_SNAKE_CASE : List[str] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def __lowercase( self : str ) -> Dict:
return AutoConfig.from_pretrained(__lowerCamelCase )
def __lowercase( self : Optional[int] ) -> int:
UpperCamelCase__ ,*UpperCamelCase__ : Dict = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.num_hidden_layers, 1 )
def __lowercase( self : List[Any] ) -> Optional[Any]:
UpperCamelCase__ ,*UpperCamelCase__ : Optional[Any] = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=__lowerCamelCase )
def __lowercase( self : str ) -> List[Any]:
UpperCamelCase__ ,*UpperCamelCase__ : str = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=__lowerCamelCase )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers )
def __lowercase( self : Union[str, Any] ) -> int:
UpperCamelCase__ ,*UpperCamelCase__ : int = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, 1 )
def __lowercase( self : Union[str, Any] ) -> Tuple:
with self.assertRaises(__lowerCamelCase ):
create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=__lowerCamelCase, d=__lowerCamelCase )
| 344
| 0
|
class __lowercase :
def __init__( self , A_ ) ->None:
'''simple docstring'''
__lowerCAmelCase : Dict = len(A_ )
__lowerCAmelCase : int = [0] * len_array
if len_array > 0:
__lowerCAmelCase : Union[str, Any] = array[0]
for i in range(1 , A_ ):
__lowerCAmelCase : str = self.prefix_sum[i - 1] + array[i]
def UpperCamelCase__ ( self , A_ , A_ ) ->int:
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def UpperCamelCase__ ( self , A_ ) ->bool:
'''simple docstring'''
__lowerCAmelCase : Any = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 583
|
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = len(lowercase__ )
__lowerCAmelCase : Any = len(lowercase__ )
__lowerCAmelCase : str = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowerCAmelCase : Optional[Any] = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowerCAmelCase : Union[str, Any] = True
if a[i].islower():
__lowerCAmelCase : Optional[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 583
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
A_ = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["BeitFeatureExtractor"]
A_ = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _lowerCamelCase (__lowerCamelCase : str ) -> None:
a__ , a__ = analyze_text(__lowerCamelCase )
a__ = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
a__ = sum(single_char_strings.values() )
# one length string
a__ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
a__ = single_char_strings[ch]
a__ = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
a__ = sum(two_char_strings.values() )
a__ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
a__ = cha + cha
if sequence in two_char_strings:
a__ = two_char_strings[sequence]
a__ = int(__lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _lowerCamelCase (__lowerCamelCase : str ) -> tuple[dict, dict]:
a__ = Counter() # type: ignore
a__ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _lowerCamelCase () -> Any:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 489
| 0
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def SCREAMING_SNAKE_CASE__ ( ) -> int:
__lowerCamelCase : Tuple = torch.nn.Linear(2 , 4 )
__lowerCamelCase : Optional[int] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__lowerCamelCase : int = torch.optim.lr_scheduler.OneCycleLR(lowerCamelCase__ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__lowerCamelCase : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__lowerCamelCase : Any = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : List[str] ) -> str:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : int ) -> int:
__lowerCamelCase : List[str] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowerCamelCase__ )
class A_ ( SCREAMING_SNAKE_CASE ):
@require_cuda
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : int = Accelerator(cpu=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[int] = Accelerator()
__lowerCamelCase : Optional[Any] = GradientState()
assert state.num_steps == 1
__lowerCamelCase : Dict = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__lowerCamelCase : Union[str, Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = Accelerator()
__lowerCamelCase : Tuple = create_components()
(
__lowerCamelCase
) : str = accelerator.prepare(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertTrue(prepared_model in accelerator._models)
self.assertTrue(prepared_optimizer in accelerator._optimizers)
self.assertTrue(prepared_scheduler in accelerator._schedulers)
self.assertTrue(prepared_train_dl in accelerator._dataloaders)
self.assertTrue(prepared_valid_dl in accelerator._dataloaders)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Optional[Any] = Accelerator()
__lowerCamelCase : int = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
accelerator.free_memory()
self.assertTrue(len(accelerator._models) == 0)
self.assertTrue(len(accelerator._optimizers) == 0)
self.assertTrue(len(accelerator._schedulers) == 0)
self.assertTrue(len(accelerator._dataloaders) == 0)
def lowerCAmelCase ( self : Any):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
pass
with patch('torch.cuda.set_device' ,SCREAMING_SNAKE_CASE__), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64'):
__lowerCamelCase : str = Accelerator()
self.assertEqual(str(accelerator.state.device) ,'cuda:64')
def lowerCAmelCase ( self : int):
__lowerCamelCase : Optional[int] = Accelerator()
__lowerCamelCase : Any = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = get_signature(SCREAMING_SNAKE_CASE__)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE__)
# make sure random weights don't match
load_random_weights(SCREAMING_SNAKE_CASE__)
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__)) > 1E-3)
# make sure loaded weights match
accelerator.load_state(SCREAMING_SNAKE_CASE__)
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__)) < 1E-3)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Tuple = Accelerator()
__lowerCamelCase : List[str] = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = get_signature(SCREAMING_SNAKE_CASE__)
# saving hook
def save_config(SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Union[str, Any] = {'class_name': models[0].__class__.__name__}
with open(os.path.join(SCREAMING_SNAKE_CASE__ ,'data.json') ,'w') as f:
json.dump(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# loading hook
def load_config(SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict):
with open(os.path.join(SCREAMING_SNAKE_CASE__ ,'data.json') ,'r') as f:
__lowerCamelCase : List[Any] = json.load(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = config['class_name']
__lowerCamelCase : Any = accelerator.register_save_state_pre_hook(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = accelerator.register_load_state_pre_hook(SCREAMING_SNAKE_CASE__)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE__)
# make sure random weights don't match with hooks
load_random_weights(SCREAMING_SNAKE_CASE__)
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__)) > 1E-3)
# random class name to verify correct one is loaded
__lowerCamelCase : Optional[Any] = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(SCREAMING_SNAKE_CASE__)
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__)) < 1E-3)
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__)
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE__)
# make sure random weights don't match with hooks removed
load_random_weights(SCREAMING_SNAKE_CASE__)
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__)) > 1E-3)
# random class name to verify correct one is loaded
__lowerCamelCase : Any = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(SCREAMING_SNAKE_CASE__)
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__)) < 1E-3)
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Union[str, Any] = Accelerator()
__lowerCamelCase : Any = create_components()
__lowerCamelCase : Optional[int] = None
# This should work
__lowerCamelCase : Dict = accelerator.prepare(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertTrue(dummy_obj is None)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Union[str, Any] = Accelerator()
__lowerCamelCase : str = create_components()
__lowerCamelCase : Any = [1, 2, 3]
# This should work
__lowerCamelCase : List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ ,'_is_accelerate_prepared' ,SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ ,'Dummy object should have `_is_accelerate_prepared` set to `True`' ,)
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ ,'_is_accelerate_prepared' ,SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ ,'Model is missing `_is_accelerator_prepared` or is set to `False`' ,)
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ ,'_is_accelerate_prepared' ,SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ ,'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' ,)
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ ,'_is_accelerate_prepared' ,SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ ,'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' ,)
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ ,'_is_accelerate_prepared' ,SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ ,'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' ,)
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ ,'_is_accelerate_prepared' ,SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ ,'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' ,)
@slow
@require_bnb
def lowerCAmelCase ( self : Optional[Any]):
from transformers import AutoModelForCausalLM
__lowerCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' ,load_in_abit=SCREAMING_SNAKE_CASE__ ,device_map={'': 0} ,)
__lowerCamelCase : str = Accelerator()
# This should work
__lowerCamelCase : Any = accelerator.prepare(SCREAMING_SNAKE_CASE__)
@slow
@require_bnb
def lowerCAmelCase ( self : Tuple):
from transformers import AutoModelForCausalLM
__lowerCamelCase : Any = Accelerator()
with init_empty_weights():
__lowerCamelCase : int = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' ,)
model.tie_weights()
__lowerCamelCase : Union[str, Any] = infer_auto_device_map(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = 'cpu'
__lowerCamelCase : List[str] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' ,device_map=SCREAMING_SNAKE_CASE__ ,load_in_abit=SCREAMING_SNAKE_CASE__ ,llm_inta_enable_fpaa_cpu_offload=SCREAMING_SNAKE_CASE__)
# This should not work and get value error
with self.assertRaises(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : int = accelerator.prepare(SCREAMING_SNAKE_CASE__)
@slow
@require_bnb
@require_multi_gpu
def lowerCAmelCase ( self : Tuple):
from transformers import AutoModelForCausalLM
__lowerCamelCase : List[Any] = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
__lowerCamelCase : List[str] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' ,)
model.tie_weights()
__lowerCamelCase : Dict = infer_auto_device_map(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' ,load_in_abit=SCREAMING_SNAKE_CASE__ ,device_map=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[str] = accelerator.prepare(SCREAMING_SNAKE_CASE__)
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def lowerCAmelCase ( self : Union[str, Any]):
from transformers import AutoModelForCausalLM
with init_empty_weights():
__lowerCamelCase : Any = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' ,)
__lowerCamelCase : Dict = infer_auto_device_map(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : int = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' ,load_in_abit=SCREAMING_SNAKE_CASE__ ,device_map=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Any = Accelerator()
# This should work
__lowerCamelCase : List[str] = accelerator.prepare(SCREAMING_SNAKE_CASE__)
@require_cuda
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Union[str, Any] = torch.nn.Linear(1_0 ,1_0)
__lowerCamelCase : Union[str, Any] = torch.optim.SGD(model.parameters() ,lr=0.01)
__lowerCamelCase : List[str] = Accelerator(cpu=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__)
| 700
|
a =[
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 337
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_a ):
'''simple docstring'''
lowerCamelCase__ = ['''keras_nlp''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
requires_backends(self , ["""keras_nlp"""] )
| 38
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =tempfile.mkdtemp()
_lowercase =BlipImageProcessor()
_lowercase =GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model')
_lowercase =BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert')
_lowercase =InstructBlipProcessor(snake_case, snake_case, snake_case)
processor.save_pretrained(self.tmpdirname)
def UpperCamelCase__ ( self :List[str], **snake_case :str):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **snake_case).tokenizer
def UpperCamelCase__ ( self :Optional[Any], **snake_case :List[Any]):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **snake_case).image_processor
def UpperCamelCase__ ( self :Tuple, **snake_case :Any):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **snake_case).qformer_tokenizer
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =[np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowercase =[Image.fromarray(np.moveaxis(snake_case, 0, -1)) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =InstructBlipProcessor(
tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor(), qformer_tokenizer=self.get_qformer_tokenizer(), )
processor.save_pretrained(self.tmpdirname)
_lowercase =self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
_lowercase =self.get_image_processor(do_normalize=snake_case, padding_value=1.0)
_lowercase =InstructBlipProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=snake_case, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, snake_case)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, snake_case)
self.assertIsInstance(processor.qformer_tokenizer, snake_case)
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case)
_lowercase =self.prepare_image_inputs()
_lowercase =image_processor(snake_case, return_tensors='np')
_lowercase =processor(images=snake_case, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case)
_lowercase ='lower newer'
_lowercase =processor(text=snake_case)
_lowercase =tokenizer(snake_case, return_token_type_ids=snake_case)
_lowercase =qformer_tokenizer(snake_case, return_token_type_ids=snake_case)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key], encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key], encoded_processor['qformer_' + key])
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case)
_lowercase ='lower newer'
_lowercase =self.prepare_image_inputs()
_lowercase =processor(text=snake_case, images=snake_case)
self.assertListEqual(
list(inputs.keys()), ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'], )
# test if it raises when no input is passed
with pytest.raises(snake_case):
processor()
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case)
_lowercase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase =processor.batch_decode(snake_case)
_lowercase =tokenizer.batch_decode(snake_case)
self.assertListEqual(snake_case, snake_case)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case)
_lowercase ='lower newer'
_lowercase =self.prepare_image_inputs()
_lowercase =processor(text=snake_case, images=snake_case)
self.assertListEqual(
list(inputs.keys()), ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'], )
| 181
| 0
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def snake_case__ ( __lowercase ) -> str:
"""simple docstring"""
A__ : str = checkpoints.load_tax_checkpoint(__lowercase )
A__ : Optional[int] = flatten_dict(__lowercase )
return flax_params
def snake_case__ ( __lowercase ) -> Optional[Any]:
"""simple docstring"""
A__ : int = {}
A__ : Union[str, Any] = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
A__ : str = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
A__ : List[str] = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
A__ : Union[str, Any] = new_key.replace(__lowercase , __lowercase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
A__ : str = new_key.replace(__lowercase , __lowercase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
A__ : Union[str, Any] = re.sub(r"layers_(\d+)" , r"layer.\1" , __lowercase )
A__ : int = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
A__ : Dict = re.sub(r"layers_(\d+)" , r"layer.\1" , __lowercase )
A__ : List[Any] = flax_dict[key]
A__ : Any = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
A__ : Tuple = torch.from_numpy(converted_dict[key].T )
else:
A__ : str = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def snake_case__ ( __lowercase , __lowercase , __lowercase=False , __lowercase=False ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = get_flax_param(__lowercase )
if not use_large:
A__ : List[Any] = PixaStructVisionConfig()
A__ : Optional[Any] = PixaStructTextConfig()
else:
A__ : Optional[Any] = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
A__ : Optional[Any] = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
A__ : Optional[int] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__lowercase )
A__ : List[str] = PixaStructForConditionalGeneration(__lowercase )
A__ : List[Any] = rename_and_convert_flax_params(__lowercase )
model.load_state_dict(__lowercase )
A__ : Dict = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
A__ : List[str] = PixaStructImageProcessor()
A__ : int = PixaStructProcessor(image_processor=__lowercase , tokenizer=__lowercase )
if use_large:
A__ : Optional[Any] = 4_0_9_6
A__ : Optional[int] = True
# mkdir if needed
os.makedirs(__lowercase , exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
print("Model saved in {}".format(__lowercase ) )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
snake_case : Optional[int] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 182
|
from collections.abc import Callable
import numpy as np
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> np.array:
"""simple docstring"""
A__ : Any = int(np.ceil((x_end - xa) / step_size ) )
A__ : Union[str, Any] = np.zeros((n + 1,) )
A__ : Any = ya
A__ : Union[str, Any] = xa
for k in range(__lowercase ):
A__ : Any = y[k] + step_size * ode_func(__lowercase , y[k] )
A__ : Any = y[k] + (
(step_size / 2) * (ode_func(__lowercase , y[k] ) + ode_func(x + step_size , __lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = 1
snake_case : Dict = 3
snake_case : List[Any] = (32, 32)
snake_case : Dict = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : int = UNetaDConditionModel(
block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=SCREAMING_SNAKE_CASE_ ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,)
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : str = AutoencoderKL(
block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
return CLIPTextModel(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Any = self.dummy_cond_unet_upscale
snake_case : Optional[Any] = DDPMScheduler()
snake_case : Optional[Any] = DDIMScheduler(prediction_type="""v_prediction""" )
snake_case : Any = self.dummy_vae
snake_case : Any = self.dummy_text_encoder
snake_case : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : Any = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case : int = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case : Tuple = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE_ ,low_res_scheduler=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ ,vae=SCREAMING_SNAKE_CASE_ ,text_encoder=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ ,max_noise_level=350 ,)
snake_case : str = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = """A painting of a squirrel eating a burger"""
snake_case : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
snake_case : Any = sd_pipe(
[prompt] ,image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
snake_case : Optional[int] = output.images
snake_case : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
snake_case : List[Any] = sd_pipe(
[prompt] ,image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=SCREAMING_SNAKE_CASE_ ,)[0]
snake_case : str = image[0, -3:, -3:, -1]
snake_case : List[str] = image_from_tuple[0, -3:, -3:, -1]
snake_case : Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case : int = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Any = self.dummy_cond_unet_upscale
snake_case : List[str] = DDPMScheduler()
snake_case : Optional[int] = DDIMScheduler(prediction_type="""v_prediction""" )
snake_case : str = self.dummy_vae
snake_case : List[str] = self.dummy_text_encoder
snake_case : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : Optional[int] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case : Tuple = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case : str = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE_ ,low_res_scheduler=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ ,vae=SCREAMING_SNAKE_CASE_ ,text_encoder=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ ,max_noise_level=350 ,)
snake_case : str = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = """A painting of a squirrel eating a burger"""
snake_case : List[Any] = sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
snake_case : Any = output.images
assert image.shape[0] == 2
snake_case : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
snake_case : Dict = sd_pipe(
[prompt] ,image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
snake_case : Tuple = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = self.dummy_cond_unet_upscale
snake_case : Dict = DDPMScheduler()
snake_case : Any = DDIMScheduler(prediction_type="""v_prediction""" )
snake_case : str = self.dummy_vae
snake_case : Dict = self.dummy_text_encoder
snake_case : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : List[str] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case : Optional[int] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
snake_case : Any = unet.half()
snake_case : Tuple = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case : str = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE_ ,low_res_scheduler=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ ,vae=SCREAMING_SNAKE_CASE_ ,text_encoder=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ ,max_noise_level=350 ,)
snake_case : Optional[int] = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : Any = """A painting of a squirrel eating a burger"""
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : int = sd_pipe(
[prompt] ,image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,num_inference_steps=2 ,output_type="""np""" ,).images
snake_case : Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
snake_case : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
snake_case : str = """stabilityai/stable-diffusion-x4-upscaler"""
snake_case : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
snake_case : str = """a cat sitting on a park bench"""
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : Union[str, Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_ ,image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,output_type="""np""" ,)
snake_case : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
snake_case : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
snake_case : str = """stabilityai/stable-diffusion-x4-upscaler"""
snake_case : int = StableDiffusionUpscalePipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ ,torch_dtype=torch.floataa ,)
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
snake_case : List[str] = """a cat sitting on a park bench"""
snake_case : Tuple = torch.manual_seed(0 )
snake_case : List[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_ ,image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,output_type="""np""" ,)
snake_case : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case_ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
snake_case : str = """stabilityai/stable-diffusion-x4-upscaler"""
snake_case : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ ,torch_dtype=torch.floataa ,)
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case : Any = """a cat sitting on a park bench"""
snake_case : Union[str, Any] = torch.manual_seed(0 )
snake_case : List[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_ ,image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,num_inference_steps=5 ,output_type="""np""" ,)
snake_case : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 36
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A__ ( unittest.TestCase ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =parent
_a : List[str] =batch_size
_a : List[str] =seq_length
_a : List[Any] =is_training
_a : Optional[int] =use_attention_mask
_a : List[Any] =use_token_type_ids
_a : List[Any] =use_labels
_a : Optional[Any] =vocab_size
_a : str =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Union[str, Any] =intermediate_size
_a : int =hidden_act
_a : List[str] =hidden_dropout_prob
_a : Optional[int] =attention_probs_dropout_prob
_a : Dict =max_position_embeddings
_a : Any =type_vocab_size
_a : str =type_sequence_label_size
_a : str =initializer_range
_a : List[str] =num_choices
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict =None
if self.use_attention_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
if self.use_token_type_ids:
_a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] =config_and_inputs
_a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
_a , _a , _a , _a : Optional[int] =config_and_inputs
_a : Tuple =True
_a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self :str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
_a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Dict =model(SCREAMING_SNAKE_CASE )[0]
_a : List[Any] =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_a : Any =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ : str = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 464
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] , A : Any ):
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : str = int(factor * num_class_images )
SCREAMING_SNAKE_CASE : Union[str, Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=A , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=A )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
SCREAMING_SNAKE_CASE : Union[str, Any] = client.query(text=A )
if len(A ) >= factor * num_class_images or num_images > 1e4:
break
else:
SCREAMING_SNAKE_CASE : Optional[int] = int(factor * num_images )
SCREAMING_SNAKE_CASE : str = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=A , aesthetic_weight=0.1 , )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Tuple = tqdm(desc='''downloading real regularization images''' , total=A )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
SCREAMING_SNAKE_CASE : int = class_images[count]
count += 1
try:
SCREAMING_SNAKE_CASE : int = requests.get(images['''url'''] )
if img.status_code == 200:
SCREAMING_SNAKE_CASE : List[str] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser('''''' , add_help=A )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=A , type=A )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=A , type=A )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=A )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 464
| 1
|
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCamelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCamelCase = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = 0.0
for i, j in zip(_UpperCAmelCase , _UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_UpperCAmelCase , _UpperCAmelCase ) else 0.0
UpperCAmelCase_ = n_correct / len(_UpperCAmelCase )
return {
"accuracy": accuracy,
}
| 82
|
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return (x * y) // greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ = 20 ):
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(lowerCAmelCase__ , lowerCAmelCase__ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 82
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = 42
class __UpperCAmelCase( A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self , __magic_name__ = 3 , __magic_name__ = 3 , __magic_name__ = ("DownEncoderBlock2D",) , __magic_name__ = ("UpDecoderBlock2D",) , __magic_name__ = (64,) , __magic_name__ = 1 , __magic_name__ = "silu" , __magic_name__ = 3 , __magic_name__ = 32 , __magic_name__ = 256 , __magic_name__ = 32 , __magic_name__ = None , __magic_name__ = 0.1_82_15 , __magic_name__ = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
A_ : Optional[int] = Encoder(
in_channels=__magic_name__ , out_channels=__magic_name__ , down_block_types=__magic_name__ , block_out_channels=__magic_name__ , layers_per_block=__magic_name__ , act_fn=__magic_name__ , norm_num_groups=__magic_name__ , double_z=__magic_name__ , )
A_ : Union[str, Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
A_ : Optional[Any] = nn.Convad(__magic_name__ , __magic_name__ , 1 )
A_ : Any = VectorQuantizer(__magic_name__ , __magic_name__ , beta=0.25 , remap=__magic_name__ , sane_index_shape=__magic_name__ )
A_ : Union[str, Any] = nn.Convad(__magic_name__ , __magic_name__ , 1 )
# pass init params to Decoder
A_ : List[Any] = Decoder(
in_channels=__magic_name__ , out_channels=__magic_name__ , up_block_types=__magic_name__ , block_out_channels=__magic_name__ , layers_per_block=__magic_name__ , act_fn=__magic_name__ , norm_num_groups=__magic_name__ , norm_type=__magic_name__ , )
@apply_forward_hook
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = True ):
"""simple docstring"""
A_ : List[Any] = self.encoder(__magic_name__ )
A_ : int = self.quant_conv(__magic_name__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__magic_name__ )
@apply_forward_hook
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = False , __magic_name__ = True ):
"""simple docstring"""
if not force_not_quantize:
A_ : str = self.quantize(__magic_name__ )
else:
A_ : Optional[Any] = h
A_ : Tuple = self.post_quant_conv(__magic_name__ )
A_ : List[Any] = self.decoder(__magic_name__ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = True ):
"""simple docstring"""
A_ : Tuple = sample
A_ : Union[str, Any] = self.encode(__magic_name__ ).latents
A_ : Optional[int] = self.decode(__magic_name__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__magic_name__ )
| 709
|
from __future__ import annotations
from collections.abc import Callable
def a__ ( a , a , a , a = 1_0_0 , ) -> float:
A_ : Any = x_start
A_ : int = fnc(a )
A_ : int = 0.0
for _ in range(a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
A_ : List[Any] = (x_end - x_start) / steps + xa
A_ : Dict = fnc(a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
A_ : Optional[int] = xa
A_ : List[str] = fxa
return area
if __name__ == "__main__":
def a__ ( a ) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 236
| 0
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __UpperCamelCase ( _a ):
'''simple docstring'''
@require_torch
def _UpperCAmelCase ( self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase__: Dict = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
UpperCAmelCase__: Union[str, Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
UpperCAmelCase__: Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
UpperCAmelCase__: str = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(lowerCamelCase__ )
BertModel.from_pretrained(lowerCamelCase__ )
BertTokenizer.from_pretrained(lowerCamelCase__ )
pipeline(task="fill-mask" , model=lowerCamelCase__ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase__: Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
UpperCAmelCase__: List[str] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase__: Any = "1"
UpperCAmelCase__: Optional[Any] = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase__: Union[str, Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
UpperCAmelCase__: Union[str, Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
UpperCAmelCase__: Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
UpperCAmelCase__: List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(lowerCamelCase__ )
BertModel.from_pretrained(lowerCamelCase__ )
BertTokenizer.from_pretrained(lowerCamelCase__ )
pipeline(task="fill-mask" , model=lowerCamelCase__ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase__: Dict = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
UpperCAmelCase__: Dict = self.get_env()
UpperCAmelCase__: Optional[int] = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase__: Union[str, Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
UpperCAmelCase__: int = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
UpperCAmelCase__: Optional[int] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
UpperCAmelCase__: Union[str, Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
UpperCAmelCase__: Optional[int] = self.get_env()
UpperCAmelCase__: List[Any] = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
UpperCAmelCase__: Dict = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase__: List[Any] = "1"
UpperCAmelCase__: List[str] = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ):
UpperCAmelCase__: int = "\nfrom transformers import pipeline\n "
UpperCAmelCase__: Dict = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
UpperCAmelCase__: Optional[int] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
UpperCAmelCase__: Tuple = self.get_env()
UpperCAmelCase__: Dict = "1"
UpperCAmelCase__: Optional[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
UpperCAmelCase__: int = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Any = "\nfrom transformers import AutoModel\n "
UpperCAmelCase__: str = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
UpperCAmelCase__: Union[str, Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
UpperCAmelCase__: List[str] = self.get_env()
UpperCAmelCase__: int = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase__: Optional[Any] = "1"
UpperCAmelCase__: Any = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 113
|
_lowerCAmelCase : int ="""
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCAmelCase : List[str] =[{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCAmelCase : int ={
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 113
| 1
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase_ (A : Any ):
snake_case__ : Dict = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowercase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
snake_case__ : Optional[int] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
snake_case__ : List[Any] = [[0.0, 0.0], [0.0, 0.0]]
snake_case__ : int = matrix[1][1], matrix[0][0]
snake_case__ : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowercase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowercase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
snake_case__ : Optional[int] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
snake_case__ : Optional[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
snake_case__ : Optional[Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
snake_case__ : Optional[int] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
snake_case__ : str = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
snake_case__ : int = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
snake_case__ : Optional[int] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
snake_case__ : List[str] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
snake_case__ : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
snake_case__ : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
snake_case__ : List[str] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
snake_case__ : str = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
snake_case__ : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
snake_case__ : List[str] = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowercase_ )
# Calculate the inverse of the matrix
return [[float(d(lowercase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 707
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a_ :Dict = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 243
| 0
|
'''simple docstring'''
lowerCAmelCase__ : Any = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowerCAmelCase__ : List[str] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _a ( __lowerCAmelCase : dict[int, list[int]] , __lowerCAmelCase : int , __lowerCAmelCase : list[bool] ):
"""simple docstring"""
snake_case__ : Union[str, Any] = True
snake_case__ : List[str] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
order.append(__lowerCAmelCase )
return order
def _a ( __lowerCAmelCase : dict[int, list[int]] , __lowerCAmelCase : int , __lowerCAmelCase : list[bool] ):
"""simple docstring"""
snake_case__ : List[str] = True
snake_case__ : Optional[Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return component
def _a ( __lowerCAmelCase : dict[int, list[int]] ):
"""simple docstring"""
snake_case__ : List[str] = len(__lowerCAmelCase ) * [False]
snake_case__ : dict[int, list[int]] = {vert: [] for vert in range(len(__lowerCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__lowerCAmelCase )
snake_case__ : Optional[Any] = []
for i, was_visited in enumerate(__lowerCAmelCase ):
if not was_visited:
order += topology_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ : List[str] = []
snake_case__ : List[str] = len(__lowerCAmelCase ) * [False]
for i in range(len(__lowerCAmelCase ) ):
snake_case__ : Optional[int] = order[len(__lowerCAmelCase ) - i - 1]
if not visited[vert]:
snake_case__ : Tuple = find_components(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
components_list.append(__lowerCAmelCase )
return components_list
| 347
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : int , snake_case_ : Union[str, Any]=1_3 , snake_case_ : Optional[Any]=7 , snake_case_ : List[str]=True , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=True , snake_case_ : List[str]=True , snake_case_ : Optional[Any]=9_9 , snake_case_ : int=3_2 , snake_case_ : str=5 , snake_case_ : int=4 , snake_case_ : List[str]=3_7 , snake_case_ : Tuple="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Union[str, Any]=5_1_2 , snake_case_ : Union[str, Any]=1_6 , snake_case_ : List[Any]=2 , snake_case_ : List[Any]=0.0_2 , snake_case_ : Any=False , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]="None" , snake_case_ : Dict=3 , snake_case_ : Optional[int]=4 , snake_case_ : Any=None , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = parent
snake_case__ : Any = batch_size
snake_case__ : List[Any] = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : Dict = use_input_mask
snake_case__ : List[str] = use_token_type_ids
snake_case__ : int = use_labels
snake_case__ : int = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Union[str, Any] = type_sequence_label_size
snake_case__ : List[Any] = initializer_range
snake_case__ : Any = num_labels
snake_case__ : Tuple = num_choices
snake_case__ : List[str] = relative_attention
snake_case__ : Optional[int] = position_biased_input
snake_case__ : Union[str, Any] = pos_att_type
snake_case__ : Optional[Any] = scope
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = None
if self.use_input_mask:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ : int = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : List[str] = None
snake_case__ : Tuple = None
snake_case__ : Any = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : int ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __magic_name__ ( self : List[str] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : List[Any] = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
snake_case__ : Optional[int] = model(snake_case_ , token_type_ids=snake_case_ )[0]
snake_case__ : Dict = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __magic_name__ ( self : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Tuple = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Tuple ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.num_labels
snake_case__ : Optional[int] = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Any ):
'''simple docstring'''
snake_case__ : List[Any] = self.num_labels
snake_case__ : Any = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Any , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[str] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : int , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Dict = config_and_inputs
snake_case__ : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : int = DebertaVaModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def __magic_name__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : int = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def __magic_name__ ( self : str ):
'''simple docstring'''
pass
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Optional[int] = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
snake_case__ : Tuple = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
snake_case__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
snake_case__ : Dict = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 347
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
A = 384
A = 7
if "tiny" in model_name:
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif "small" in model_name:
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif "base" in model_name:
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
A = 12
A = 512
elif "large" in model_name:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
A = 12
A = 768
# set label information
A = 150
A = 'huggingface/label-files'
A = 'ade20k-id2label.json'
A = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
A = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
A = {v: k for k, v in idalabel.items()}
A = SwinConfig(
embed_dim=lowerCAmelCase__ , depths=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , window_size=lowerCAmelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
A = UperNetConfig(
backbone_config=lowerCAmelCase__ , auxiliary_in_channels=lowerCAmelCase__ , num_labels=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
A = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
A = dct.pop(lowerCAmelCase__ )
A = val
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
A = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:dim, :]
A = in_proj_bias[: dim]
A = in_proj_weight[
dim : dim * 2, :
]
A = in_proj_bias[
dim : dim * 2
]
A = in_proj_weight[
-dim :, :
]
A = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> str:
'''simple docstring'''
A , A = x.shape
A = x.reshape(lowerCAmelCase__ , 4 , in_channel // 4 )
A = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCAmelCase__ , lowerCAmelCase__ )
return x
def lowerCamelCase_ ( lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
A , A = x.shape
A = x.reshape(lowerCAmelCase__ , in_channel // 4 , 4 )
A = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCAmelCase__ , lowerCAmelCase__ )
return x
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A = x.shape[0]
A = x.reshape(4 , in_channel // 4 )
A = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCAmelCase__ )
return x
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Dict:
'''simple docstring'''
A = x.shape[0]
A = x.reshape(in_channel // 4 , 4 )
A = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCAmelCase__ )
return x
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
A = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
A = model_name_to_url[model_name]
A = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='cpu' , file_name=lowerCAmelCase__ )[
'state_dict'
]
for name, param in state_dict.items():
print(lowerCAmelCase__ , param.shape )
A = get_upernet_config(lowerCAmelCase__ )
A = UperNetForSemanticSegmentation(lowerCAmelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A = state_dict.pop(lowerCAmelCase__ )
if "bn" in key:
A = key.replace('bn' , 'batch_norm' )
A = val
# rename keys
A = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
A = reverse_correct_unfold_reduction_order(lowerCAmelCase__ )
if "norm" in key:
A = reverse_correct_unfold_norm_order(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
# verify on image
A = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
A = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('RGB' )
A = SegformerImageProcessor()
A = processor(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
A = model(lowerCAmelCase__ )
A = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
A = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
A = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
A = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
A = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__snake_case :int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F'''upernet-swin-{size}''' for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__snake_case :Optional[Any] =parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 224
|
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = ''
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return data[1:] + data[0]
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
A = ''
for i in range(len(lowerCAmelCase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A = int('0b' + data[0] + data[-1] , 2 )
A = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A = message[:4]
A = message[4:]
A = apply_table(lowerCAmelCase__ , lowerCAmelCase__ )
A = xor(lowerCAmelCase__ , lowerCAmelCase__ )
A = apply_sbox(lowerCAmelCase__ , temp[:4] ) # noqa: E741
A = apply_sbox(lowerCAmelCase__ , temp[4:] )
A = '0' * (2 - len(lowerCAmelCase__ )) + l # noqa: E741
A = '0' * (2 - len(lowerCAmelCase__ )) + r
A = apply_table(l + r , lowerCAmelCase__ )
A = xor(lowerCAmelCase__ , lowerCAmelCase__ )
return temp + right
if __name__ == "__main__":
__snake_case :Tuple =input('Enter 10 bit key: ')
__snake_case :Union[str, Any] =input('Enter 8 bit message: ')
__snake_case :int =[6, 3, 7, 4, 8, 5, 10, 9]
__snake_case :Dict =[3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__snake_case :Optional[int] =[2, 4, 3, 1]
__snake_case :Tuple =[2, 6, 3, 1, 4, 8, 5, 7]
__snake_case :Dict =[4, 1, 3, 5, 7, 2, 8, 6]
__snake_case :str =[4, 1, 2, 3, 2, 3, 4, 1]
__snake_case :Optional[int] =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__snake_case :Optional[Any] =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__snake_case :List[Any] =apply_table(key, paa_table)
__snake_case :Tuple =temp[:5]
__snake_case :int =temp[5:]
__snake_case :int =left_shift(left)
__snake_case :List[str] =left_shift(right)
__snake_case :Tuple =apply_table(left + right, pa_table)
__snake_case :List[Any] =left_shift(left)
__snake_case :str =left_shift(right)
__snake_case :Optional[Any] =left_shift(left)
__snake_case :Optional[Any] =left_shift(right)
__snake_case :Any =apply_table(left + right, pa_table)
# encryption
__snake_case :Optional[Any] =apply_table(message, IP)
__snake_case :Optional[int] =function(expansion, sa, sa, keya, temp)
__snake_case :Dict =temp[4:] + temp[:4]
__snake_case :Any =function(expansion, sa, sa, keya, temp)
__snake_case :Optional[int] =apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
__snake_case :str =apply_table(CT, IP)
__snake_case :str =function(expansion, sa, sa, keya, temp)
__snake_case :List[str] =temp[4:] + temp[:4]
__snake_case :Tuple =function(expansion, sa, sa, keya, temp)
__snake_case :Optional[int] =apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 224
| 1
|
def _A ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
__snake_case = generate_large_matrix()
__snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _A ( _lowercase ) -> None:
"""simple docstring"""
assert all(row == sorted(_lowercase , reverse=_lowercase ) for row in grid )
assert all(list(_lowercase ) == sorted(_lowercase , reverse=_lowercase ) for col in zip(*_lowercase ) )
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCamelCase = (left + right) // 2
__UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCamelCase = mid + 1
else:
__UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowercase )
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = len(grid[0] )
for i in range(len(_lowercase ) ):
__UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowercase ) * len(grid[0] )) - total
def _A ( _lowercase ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
for row in grid:
for i, number in enumerate(_lowercase ):
if number < 0:
total += len(_lowercase ) - i
break
return total
def _A ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Running benchmarks' )
__UpperCamelCase = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCamelCase = timeit(f'''{func}(grid=grid)''' , setup=_lowercase , number=5_00 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 1
|
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase : Dict = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = ["MobileViTFeatureExtractor"]
lowercase : Optional[Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class A ( unittest.TestCase ):
__magic_name__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__magic_name__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__magic_name__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__magic_name__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Any = ZeroShotClassificationPipeline(
model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
# No kwarg
A : Dict = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
A : str = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
A : str = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A : Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A : Any = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
# https://github.com/huggingface/transformers/issues/13846
A : List[str] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]}
for i in range(1 )
] , )
A : Dict = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]}
for i in range(2 )
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(SCREAMING_SNAKE_CASE , candidate_labels='''politics''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=SCREAMING_SNAKE_CASE , )
self.run_entailment_id(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : List[Any] = zero_shot_classifier.model.config
A : int = config.labelaid
A : Union[str, Any] = zero_shot_classifier.entailment_id
A : str = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A : Optional[Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A : List[str] = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A : List[str] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A : Any = original_labelaid
self.assertEqual(SCREAMING_SNAKE_CASE , zero_shot_classifier.entailment_id )
@require_torch
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
A : Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
A : Union[str, Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : str = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
A : Tuple = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A : List[str] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[str] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
A : List[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A : Tuple = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 343
| 0
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def __magic_name__( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__( self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __magic_name__( self , **__UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return ("This is a test", "This is a test")
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = '''</s>'''
lowerCAmelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(__UpperCAmelCase ) , 1103 )
def __magic_name__( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.'''
lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example''']
lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ : Optional[int] = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def __magic_name__( self ):
# fmt: off
lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def __magic_name__( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__( self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __magic_name__( self , **__UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return ("This is a test", "This is a test")
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : str = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example''']
lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ : Tuple = self._large_tokenizer(
text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(
__UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 678
|
import collections
import os
import re
from pathlib import Path
lowerCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase_ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase_ = re.compile(R"""^\s*else:""")
def __lowerCAmelCase ( UpperCamelCase ) -> int:
if _re_test_backend.search(UpperCamelCase ) is None:
return None
lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )]
backends.sort()
return "_and_".join(UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Any:
with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase__ : Union[str, Any] = f.readlines()
lowerCAmelCase__ : Tuple = 0
while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase ):
lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0]
lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase )
if single_line_import_search is not None:
lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ : Any = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase__ : str = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase ) is not None:
lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' )
lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_between_brackets.search(UpperCamelCase ) is not None:
lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' )
lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_quote_object.search(UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ : Any = []
while (
line_index < len(UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase__ : Tuple = lines[line_index]
lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ : Dict = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase__ : Any = lines[line_index]
lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
def find_duplicates(UpperCamelCase ):
return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ : Optional[Any] = []
for key in import_dict_objects.keys():
lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCAmelCase ( ) -> Optional[Any]:
lowerCAmelCase__ : Dict = []
for root, _, files in os.walk(UpperCamelCase ):
if "__init__.py" in files:
lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' )
lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase )
if objects is not None:
lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase )
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
lowerCAmelCase__ : str = []
for path, directories, files in os.walk(UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) )
lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' )
submodules.append(UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) )
lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(UpperCamelCase )
return submodules
lowerCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def __lowerCAmelCase ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase )
lowerCAmelCase__ : int = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f:
lowerCAmelCase__ : str = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) )
lowerCAmelCase__ : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 678
| 1
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ = 16
lowercase__ = 32
def __UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] = 16 ) -> Tuple:
'''simple docstring'''
_a = AutoTokenizer.from_pretrained("bert-base-cased" )
_a = load_dataset("glue" , "mrpc" )
def tokenize_function(__lowerCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCamelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
__snake_case , padding="longest" , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors="pt" , )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
_a = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ = mocked_dataloaders # noqa: F811
def __UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __snake_case ) == "1":
_a = 2
# Initialize accelerator
_a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config["lr"]
_a = int(config["num_epochs"] )
_a = int(config["seed"] )
_a = int(config["batch_size"] )
_a = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(__lowerCamelCase : Tuple ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters() , lr=__snake_case )
_a , _a = get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a = model(**__snake_case )
_a = outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**__snake_case )
_a = outputs.logits.argmax(dim=-1 )
_a , _a = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
_a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__snake_case , default=__snake_case , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_a = parser.parse_args()
_a = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 705
|
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = 42
UpperCAmelCase = jnp.floataa
UpperCAmelCase = True
def a_ ( self ) -> int:
super().setup()
_a = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
_a = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
_a = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = FlaxBigBirdForNaturalQuestionsModule
def __UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
def cross_entropy(__lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str=None ):
_a = logits.shape[-1]
_a = (labels[..., None] == jnp.arange(__lowerCamelCase )[None]).astype("f4" )
_a = jax.nn.log_softmax(__lowerCamelCase , axis=-1 )
_a = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_a = reduction(__lowerCamelCase )
return loss
_a = partial(__lowerCamelCase , reduction=jnp.mean )
_a = cross_entropy(__lowerCamelCase , __lowerCamelCase )
_a = cross_entropy(__lowerCamelCase , __lowerCamelCase )
_a = cross_entropy(__lowerCamelCase , __lowerCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = "google/bigbird-roberta-base"
UpperCAmelCase = 3_000
UpperCAmelCase = 10_500
UpperCAmelCase = 128
UpperCAmelCase = 3
UpperCAmelCase = 1
UpperCAmelCase = 5
# tx_args
UpperCAmelCase = 3E-5
UpperCAmelCase = 0.0
UpperCAmelCase = 20_000
UpperCAmelCase = 0.00_95
UpperCAmelCase = "bigbird-roberta-natural-questions"
UpperCAmelCase = "training-expt"
UpperCAmelCase = "data/nq-training.jsonl"
UpperCAmelCase = "data/nq-validation.jsonl"
def a_ ( self ) -> Tuple:
os.makedirs(self.base_dir , exist_ok=__UpperCamelCase )
_a = os.path.join(self.base_dir , self.save_dir )
_a = self.batch_size_per_device * jax.device_count()
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = 42
UpperCAmelCase = 4_096 # no dynamic padding on TPUs
def __call__( self , __UpperCamelCase ) -> Dict:
_a = self.collate_fn(__UpperCamelCase )
_a = jax.tree_util.tree_map(__UpperCamelCase , __UpperCamelCase )
return batch
def a_ ( self , __UpperCamelCase ) -> Tuple:
_a , _a = self.fetch_inputs(features["input_ids"] )
_a = {
"input_ids": jnp.array(__UpperCamelCase , dtype=jnp.intaa ),
"attention_mask": jnp.array(__UpperCamelCase , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def a_ ( self , __UpperCamelCase ) -> List[str]:
_a = [self._fetch_inputs(__UpperCamelCase ) for ids in input_ids]
return zip(*__UpperCamelCase )
def a_ ( self , __UpperCamelCase ) -> List[str]:
_a = [1 for _ in range(len(__UpperCamelCase ) )]
while len(__UpperCamelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=None ) -> List[Any]:
'''simple docstring'''
if seed is not None:
_a = dataset.shuffle(seed=__lowerCamelCase )
for i in range(len(__lowerCamelCase ) // batch_size ):
_a = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__lowerCamelCase )
@partial(jax.pmap , axis_name="batch" )
def __UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , **__lowerCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
def loss_fn(__lowerCamelCase : str ):
_a = model_inputs.pop("start_labels" )
_a = model_inputs.pop("end_labels" )
_a = model_inputs.pop("pooled_labels" )
_a = state.apply_fn(**__lowerCamelCase , params=__lowerCamelCase , dropout_rng=__lowerCamelCase , train=__lowerCamelCase )
_a , _a , _a = outputs
return state.loss_fn(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_a , _a = jax.random.split(__lowerCamelCase )
_a = jax.value_and_grad(__lowerCamelCase )
_a , _a = grad_fn(state.params )
_a = jax.lax.pmean({"loss": loss} , axis_name="batch" )
_a = jax.lax.pmean(__lowerCamelCase , "batch" )
_a = state.apply_gradients(grads=__lowerCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def __UpperCamelCase ( __lowerCamelCase : Any , **__lowerCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
_a = model_inputs.pop("start_labels" )
_a = model_inputs.pop("end_labels" )
_a = model_inputs.pop("pooled_labels" )
_a = state.apply_fn(**__lowerCamelCase , params=state.params , train=__lowerCamelCase )
_a , _a , _a = outputs
_a = state.loss_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_a = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class __SCREAMING_SNAKE_CASE ( train_state.TrainState ):
UpperCAmelCase = struct.field(pytree_node=lowerCamelCase__ )
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = None
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ) -> Union[str, Any]:
_a = model.params
_a = TrainState.create(
apply_fn=model.__call__ , params=__UpperCamelCase , tx=__UpperCamelCase , loss_fn=__UpperCamelCase , )
if ckpt_dir is not None:
_a , _a , _a , _a , _a = restore_checkpoint(__UpperCamelCase , __UpperCamelCase )
_a = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
_a , _a = build_tx(**__UpperCamelCase )
_a = train_state.TrainState(
step=__UpperCamelCase , apply_fn=model.__call__ , params=__UpperCamelCase , tx=__UpperCamelCase , opt_state=__UpperCamelCase , )
_a = args
_a = data_collator
_a = lr
_a = params
_a = jax_utils.replicate(__UpperCamelCase )
return state
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
_a = self.args
_a = len(__UpperCamelCase ) // args.batch_size
_a = jax.random.PRNGKey(0 )
_a = jax.random.split(__UpperCamelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
_a = jnp.array(0 , dtype=jnp.floataa )
_a = get_batched_dataset(__UpperCamelCase , args.batch_size , seed=__UpperCamelCase )
_a = 0
for batch in tqdm(__UpperCamelCase , total=__UpperCamelCase , desc=f"Running EPOCH-{epoch}" ):
_a = self.data_collator(__UpperCamelCase )
_a , _a , _a = self.train_step_fn(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
_a = jax_utils.unreplicate(state.step )
_a = running_loss.item() / i
_a = self.scheduler_fn(state_step - 1 )
_a = self.evaluate(__UpperCamelCase , __UpperCamelCase )
_a = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(__UpperCamelCase ) )
self.logger.log(__UpperCamelCase , commit=__UpperCamelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}" , state=__UpperCamelCase )
def a_ ( self , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
_a = get_batched_dataset(__UpperCamelCase , self.args.batch_size )
_a = len(__UpperCamelCase ) // self.args.batch_size
_a = jnp.array(0 , dtype=jnp.floataa )
_a = 0
for batch in tqdm(__UpperCamelCase , total=__UpperCamelCase , desc="Evaluating ... " ):
_a = self.data_collator(__UpperCamelCase )
_a = self.val_step_fn(__UpperCamelCase , **__UpperCamelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def a_ ( self , __UpperCamelCase , __UpperCamelCase ) -> Dict:
_a = jax_utils.unreplicate(__UpperCamelCase )
print(f"SAVING CHECKPOINT IN {save_dir}" , end=" ... " )
self.model_save_fn(__UpperCamelCase , params=state.params )
with open(os.path.join(__UpperCamelCase , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__UpperCamelCase , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(__UpperCamelCase , "data_collator.joblib" ) )
with open(os.path.join(__UpperCamelCase , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , __UpperCamelCase )
print("DONE" )
def __UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
print(F"RESTORING CHECKPOINT FROM {save_dir}" , end=" ... " )
with open(os.path.join(__lowerCamelCase , "flax_model.msgpack" ) , "rb" ) as f:
_a = from_bytes(state.params , f.read() )
with open(os.path.join(__lowerCamelCase , "opt_state.msgpack" ) , "rb" ) as f:
_a = from_bytes(state.opt_state , f.read() )
_a = joblib.load(os.path.join(__lowerCamelCase , "args.joblib" ) )
_a = joblib.load(os.path.join(__lowerCamelCase , "data_collator.joblib" ) )
with open(os.path.join(__lowerCamelCase , "training_state.json" ) , "r" ) as f:
_a = json.load(__lowerCamelCase )
_a = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def __UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
_a = num_train_steps - warmup_steps
_a = optax.linear_schedule(init_value=__lowerCamelCase , end_value=__lowerCamelCase , transition_steps=__lowerCamelCase )
_a = optax.linear_schedule(init_value=__lowerCamelCase , end_value=1E-7 , transition_steps=__lowerCamelCase )
_a = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> Any:
'''simple docstring'''
def weight_decay_mask(__lowerCamelCase : Any ):
_a = traverse_util.flatten_dict(__lowerCamelCase )
_a = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(__lowerCamelCase )
_a = scheduler_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_a = optax.adamw(learning_rate=__lowerCamelCase , weight_decay=__lowerCamelCase , mask=__lowerCamelCase )
return tx, lr
| 276
| 0
|
"""simple docstring"""
import inspect
import unittest
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[int] ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __lowerCAmelCase ( self : int ):
import diffusers
from diffusers.dependency_versions_table import deps
lowerCAmelCase__ : List[str] = inspect.getmembers(lowercase_ ,inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCAmelCase__ : Tuple = '''k-diffusion'''
elif backend == "invisible_watermark":
lowerCAmelCase__ : Any = '''invisible-watermark'''
assert backend in deps, F'{backend} is not in the deps table!'
| 450
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
def __init__( self : int ,lowercase_ : UNetaDModel ,lowercase_ : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=lowercase_ ,scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] ,lowercase_ : int = 1 ,lowercase_ : int = 2_0_0_0 ,lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowercase_ : Optional[str] = "pil" ,lowercase_ : bool = True ,**lowercase_ : Dict ,):
lowerCAmelCase__ : str = self.unet.config.sample_size
lowerCAmelCase__ : int = (batch_size, 3, img_size, img_size)
lowerCAmelCase__ : List[Any] = self.unet
lowerCAmelCase__ : Tuple = randn_tensor(lowercase_ ,generator=lowercase_ ) * self.scheduler.init_noise_sigma
lowerCAmelCase__ : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowercase_ )
self.scheduler.set_sigmas(lowercase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCAmelCase__ : str = self.unet(lowercase_ ,lowercase_ ).sample
lowerCAmelCase__ : List[str] = self.scheduler.step_correct(lowercase_ ,lowercase_ ,generator=lowercase_ ).prev_sample
# prediction step
lowerCAmelCase__ : Dict = model(lowercase_ ,lowercase_ ).sample
lowerCAmelCase__ : Optional[int] = self.scheduler.step_pred(lowercase_ ,lowercase_ ,lowercase_ ,generator=lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = output.prev_sample, output.prev_sample_mean
lowerCAmelCase__ : List[str] = sample_mean.clamp(0 ,1 )
lowerCAmelCase__ : Union[str, Any] = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ : Union[str, Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowercase_ )
| 450
| 1
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 695
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695
| 1
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
__a : Union[str, Any] = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 )
__a : List[Any] = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
for example in examples:
__a : Dict = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
@require_torch
def _lowerCamelCase ( self ):
__a : str = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
__a : List[str] = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
__a : Tuple = pipeline(
'''video-classification''' , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 )
__a : List[str] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
__a : List[Any] = video_classifier(_UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}] , )
__a : List[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
[{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def _lowerCamelCase ( self ):
pass
| 52
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''') == 1
__a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
__a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple
__a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__a : Any = logits[0, masked_index, :]
__a : Any = logits.softmax(dim=0)
__a , __a : Optional[Any] = prob.topk(k=a_ , dim=0)
__a : Optional[int] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
__a : List[str] = tokenizer.mask_token
__a : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')):
__a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained('''camembert-base''')
A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 52
| 1
|
import inspect
import unittest
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def SCREAMING_SNAKE_CASE__ ( self ):
import diffusers
from diffusers.dependency_versions_table import deps
lowercase = inspect.getmembers(snake_case , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase = 'k-diffusion'
elif backend == "invisible_watermark":
lowercase = 'invisible-watermark'
assert backend in deps, F'''{backend} is not in the deps table!'''
| 713
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowercase = ''
else:
lowercase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowercase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[
: config.hidden_size, :
]
lowercase = in_proj_bias[: config.hidden_size]
lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase = in_proj_weight[
-config.hidden_size :, :
]
lowercase = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase = val
def UpperCAmelCase_ ( ):
lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = ViTConfig()
lowercase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowercase = True
lowercase = int(vit_name[-12:-10] )
lowercase = int(vit_name[-9:-6] )
else:
lowercase = 1000
lowercase = 'huggingface/label-files'
lowercase = 'imagenet-1k-id2label.json'
lowercase = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = int(vit_name[-6:-4] )
lowercase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
lowercase = 192
lowercase = 768
lowercase = 12
lowercase = 3
elif vit_name[9:].startswith('small' ):
lowercase = 384
lowercase = 1536
lowercase = 12
lowercase = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
lowercase = 768
lowercase = 2304
lowercase = 8
lowercase = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
lowercase = 1024
lowercase = 4096
lowercase = 24
lowercase = 16
elif vit_name[4:].startswith('huge' ):
lowercase = 1280
lowercase = 5120
lowercase = 32
lowercase = 16
# load original model from timm
lowercase = timm.create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase = timm_model.state_dict()
if base_model:
remove_classification_head_(__SCREAMING_SNAKE_CASE )
lowercase = create_rename_keys(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase = ViTModel(__SCREAMING_SNAKE_CASE ).eval()
else:
lowercase = ViTForImageClassification(__SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowercase = DeiTImageProcessor(size=config.image_size )
else:
lowercase = ViTImageProcessor(size=config.image_size )
lowercase = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase = encoding['pixel_values']
lowercase = model(__SCREAMING_SNAKE_CASE )
if base_model:
lowercase = timm_model.forward_features(__SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1e-3 )
else:
lowercase = timm_model(__SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 565
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Union[str, Any] = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__magic_name__ : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__magic_name__ : Optional[Any] = 12_8022
__magic_name__ : Dict = 12_8028
@require_sentencepiece
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = MaMaaaTokenizer
snake_case__ = False
snake_case__ = False
snake_case__ = True
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = Path(self.tmpdirname )
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['spm_file'] )
UpperCamelCase = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : str , **_SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = '</s>'
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [2, 3, 4, 5, 6] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
UpperCamelCase = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , 'This is a test' )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = {'input_ids': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
'''simple docstring'''
snake_case__ = """facebook/m2m100_418M"""
snake_case__ = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
snake_case__ = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
snake_case__ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ):
"""simple docstring"""
UpperCamelCase = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
UpperCamelCase = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 12_8063 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.get_vocab()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = 'en'
UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(_SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
# fmt: off
UpperCamelCase = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
UpperCamelCase = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.lang_token_to_id , _SCREAMING_SNAKE_CASE )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 'en'
UpperCamelCase = 'fr'
UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
UpperCamelCase = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
UpperCamelCase = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
UpperCamelCase = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
# en_XX, A, test, EOS
'input_ids': [[12_8022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 12_8006,
} , )
| 280
| 0
|
"""simple docstring"""
import cva
import numpy as np
class _lowercase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : float , UpperCamelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
if k in (0.04, 0.06):
__UpperCamelCase =k
__UpperCamelCase =window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : List[str] ) -> str:
'''simple docstring'''
return str(self.k )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
__UpperCamelCase =cva.imread(UpperCamelCase__ , 0 )
__UpperCamelCase , __UpperCamelCase =img.shape
__UpperCamelCase =[]
__UpperCamelCase =img.copy()
__UpperCamelCase =cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB )
__UpperCamelCase , __UpperCamelCase =np.gradient(UpperCamelCase__ )
__UpperCamelCase =dx**2
__UpperCamelCase =dy**2
__UpperCamelCase =dx * dy
__UpperCamelCase =0.04
__UpperCamelCase =self.window_size // 2
for y in range(UpperCamelCase__ , h - offset ):
for x in range(UpperCamelCase__ , w - offset ):
__UpperCamelCase =ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCamelCase =iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCamelCase =ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCamelCase =(wxx * wyy) - (wxy**2)
__UpperCamelCase =wxx + wyy
__UpperCamelCase =det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__lowercase = HarrisCorner(0.04, 3)
__lowercase , __lowercase = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 296
|
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__UpperCamelCase =1
__UpperCamelCase =1
__UpperCamelCase ={1: 1}
for inputa in range(2 , __UpperCamelCase ):
__UpperCamelCase =0
__UpperCamelCase =inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__UpperCamelCase =(3 * number) + 1
counter += 1
if inputa not in counters:
__UpperCamelCase =counter
if counter > pre_counter:
__UpperCamelCase =inputa
__UpperCamelCase =counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 296
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = """▁"""
__snake_case = {"""vocab_file""": """sentencepiece.bpe.model"""}
__snake_case = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
__snake_case = {
"""xlm-roberta-base""": 512,
"""xlm-roberta-large""": 512,
"""xlm-roberta-large-finetuned-conll02-dutch""": 512,
"""xlm-roberta-large-finetuned-conll02-spanish""": 512,
"""xlm-roberta-large-finetuned-conll03-english""": 512,
"""xlm-roberta-large-finetuned-conll03-german""": 512,
}
class _a ( __a ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : List[str]="<s>" , lowercase_ : List[str]="</s>" , lowercase_ : Optional[Any]="</s>" , lowercase_ : Optional[Any]="<s>" , lowercase_ : List[Any]="<unk>" , lowercase_ : List[Any]="<pad>" , lowercase_ : Dict="<mask>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : str , ):
'''simple docstring'''
lowercase_ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
lowercase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ = 1
lowercase_ = len(self.sp_model ) + self.fairseq_offset
lowercase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple ):
'''simple docstring'''
lowercase_ = self.__dict__.copy()
lowercase_ = None
lowercase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , lowercase_ : Dict ):
'''simple docstring'''
lowercase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase__ ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def lowerCamelCase__ ( self : List[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : Optional[int] , lowercase_ : str ):
'''simple docstring'''
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def lowerCamelCase__ ( self : Dict , lowercase_ : Tuple ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : List[str] ):
'''simple docstring'''
lowercase_ = """""".join(lowercase_ ).replace(lowercase_ , """ """ ).strip()
return out_string
def lowerCamelCase__ ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase_ = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , """wb""" ) as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 451
|
'''simple docstring'''
import math
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 1:
lowercase_ = f"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowercase_ = int(math.log(number // 3 , 2 ) ) + 2
lowercase_ = [3, 5]
lowercase_ = 2
lowercase_ = 3
for block in range(1 , SCREAMING_SNAKE_CASE_ ):
for _ in range(SCREAMING_SNAKE_CASE_ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 451
| 1
|
'''simple docstring'''
lowercase_ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowercase_ = [None] * 1000_0000
lowercase_ = True
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowerCAmelCase =chain(next_number(a__ ) )
_lowerCAmelCase =number_chain
while number < 1_0_0_0_0_0_0_0:
_lowerCAmelCase =number_chain
number *= 1_0
return number_chain
def UpperCamelCase__ ( a__ = 1_0_0_0_0_0_0_0 ):
'''simple docstring'''
for i in range(1 , a__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 717
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 0
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowercase_ : Dict = logging.get_logger(__name__)
def A__ ( snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: Dict= UniSpeechSatForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ )
SCREAMING_SNAKE_CASE__: str= downstream_dict['''projector.weight''']
SCREAMING_SNAKE_CASE__: Union[str, Any]= downstream_dict['''projector.bias''']
SCREAMING_SNAKE_CASE__: Tuple= downstream_dict['''model.post_net.linear.weight''']
SCREAMING_SNAKE_CASE__: List[str]= downstream_dict['''model.post_net.linear.bias''']
return model
def A__ ( snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : str ):
SCREAMING_SNAKE_CASE__: Optional[int]= UniSpeechSatForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ )
SCREAMING_SNAKE_CASE__: str= downstream_dict['''model.linear.weight''']
SCREAMING_SNAKE_CASE__: List[str]= downstream_dict['''model.linear.bias''']
return model
def A__ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Dict ):
SCREAMING_SNAKE_CASE__: Optional[Any]= UniSpeechSatForXVector.from_pretrained(snake_case_ , config=snake_case_ )
SCREAMING_SNAKE_CASE__: List[str]= downstream_dict['''connector.weight''']
SCREAMING_SNAKE_CASE__: str= downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE__: List[Any]= downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
SCREAMING_SNAKE_CASE__: Any= downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
SCREAMING_SNAKE_CASE__: Union[str, Any]= downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
SCREAMING_SNAKE_CASE__: Union[str, Any]= downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
SCREAMING_SNAKE_CASE__: List[Any]= downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
SCREAMING_SNAKE_CASE__: Optional[Any]= downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
SCREAMING_SNAKE_CASE__: Optional[Any]= downstream_dict['''objective.W''']
return model
@torch.no_grad()
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Dict ):
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.load(snake_case_ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= checkpoint['''Downstream''']
SCREAMING_SNAKE_CASE__: Optional[int]= UniSpeechSatConfig.from_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[Any]= WavaVecaFeatureExtractor.from_pretrained(
snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ )
SCREAMING_SNAKE_CASE__: int= hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
SCREAMING_SNAKE_CASE__: int= convert_classification(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
SCREAMING_SNAKE_CASE__: Tuple= convert_diarization(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith('''ForXVector''' ):
SCREAMING_SNAKE_CASE__: Dict= convert_xvector(snake_case_ , snake_case_ , snake_case_ )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE__: Any= checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowercase_ : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 64
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = ShapEImgaImgPipeline
lowerCAmelCase__ = ["image"]
lowerCAmelCase__ = ["image"]
lowerCAmelCase__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowerCAmelCase__ = False
@property
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
return 8
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__SCREAMING_SNAKE_CASE = CLIPVisionModel(__SCREAMING_SNAKE_CASE )
return model
@property
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = CLIPImageProcessor(
crop_size=224 , do_center_crop=__SCREAMING_SNAKE_CASE , do_normalize=__SCREAMING_SNAKE_CASE , do_resize=__SCREAMING_SNAKE_CASE , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__SCREAMING_SNAKE_CASE = PriorTransformer(**__SCREAMING_SNAKE_CASE )
return model
@property
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__SCREAMING_SNAKE_CASE = ShapERenderer(**__SCREAMING_SNAKE_CASE )
return model
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.dummy_prior
__SCREAMING_SNAKE_CASE = self.dummy_image_encoder
__SCREAMING_SNAKE_CASE = self.dummy_image_processor
__SCREAMING_SNAKE_CASE = self.dummy_renderer
__SCREAMING_SNAKE_CASE = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=__SCREAMING_SNAKE_CASE , clip_sample=__SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__SCREAMING_SNAKE_CASE = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str=0 ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = output.images[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch_device == """cpu"""
__SCREAMING_SNAKE_CASE = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__SCREAMING_SNAKE_CASE , relax_max_difference=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__SCREAMING_SNAKE_CASE = batch_size * [inputs[key]]
__SCREAMING_SNAKE_CASE = pipe(**__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__SCREAMING_SNAKE_CASE = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__SCREAMING_SNAKE_CASE = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 627
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( _lowercase : list[int] ):
'''simple docstring'''
UpperCAmelCase : str = len(_lowercase ) // 2
# choose the middle 3 elements
UpperCAmelCase : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ : Dict = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
snake_case_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 292
| 1
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
snake_case__ : Optional[int] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
snake_case__ : Any = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
snake_case__ : int = 'zero2'
snake_case__ : List[Any] = 'zero3'
snake_case__ : Optional[Any] = [ZEROa, ZEROa]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_UpperCAmelCase =parameterized.to_safe_name("_".join(str(A__ ) for x in param.args ) )
return F"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
snake_case__ : str = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _a ( a__ ):
"""simple docstring"""
@parameterized.expand(__a , name_func=__a )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@require_torch_multi_gpu
@parameterized.expand(__a , name_func=__a )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@parameterized.expand(__a , name_func=__a )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@require_torch_multi_gpu
@parameterized.expand(__a , name_func=__a )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = True , _snake_case = True , _snake_case = True , ):
_UpperCAmelCase =models[model]
_UpperCAmelCase =self.run_trainer(
stage=__a , model_name=__a , eval_steps=__a , num_train_epochs=1 , distributed=__a , fpaa=__a , )
self.do_checks(__a )
return output_dir
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = 1 , _snake_case = True , _snake_case = True , ):
_UpperCAmelCase =self.get_auto_remove_tmp_dir("./xxx" , after=__a )
_UpperCAmelCase =F"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__a )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_UpperCAmelCase =F"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
_UpperCAmelCase =[F"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
_UpperCAmelCase =self.get_launcher(__a )
_UpperCAmelCase =launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__a , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE ( self , _snake_case=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_UpperCAmelCase =min(2 , get_gpu_count() ) if distributed else 1
return F"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 408
|
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] ) ->Tuple:
lowerCamelCase_ : Optional[Any] = """"""
lowerCamelCase_ : Dict = """"""
lowerCamelCase_ : Optional[Any] = []
def _lowerCAmelCase ( self : Optional[Any] , __a : int , __a : int ) ->int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowerCamelCase_ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowerCamelCase_ : List[Any] = self.__min_dist_top_down_dp(__a , n - 1 )
lowerCamelCase_ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , __a )
lowerCamelCase_ : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowerCamelCase_ : Tuple = 1 + min(__a , __a , __a )
return self.dp[m][n]
def _lowerCAmelCase ( self : Any , __a : str , __a : str ) ->int:
lowerCamelCase_ : Dict = worda
lowerCamelCase_ : str = worda
lowerCamelCase_ : Union[str, Any] = [[-1 for _ in range(len(__a ) )] for _ in range(len(__a ) )]
return self.__min_dist_top_down_dp(len(__a ) - 1 , len(__a ) - 1 )
def _lowerCAmelCase ( self : int , __a : str , __a : str ) ->int:
lowerCamelCase_ : Tuple = worda
lowerCamelCase_ : Optional[Any] = worda
lowerCamelCase_ : Tuple = len(__a )
lowerCamelCase_ : Optional[Any] = len(__a )
lowerCamelCase_ : List[Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowerCamelCase_ : Optional[Any] = j
elif j == 0: # second string is empty
lowerCamelCase_ : Any = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowerCamelCase_ : Tuple = self.dp[i - 1][j - 1]
else:
lowerCamelCase_ : Dict = self.dp[i][j - 1]
lowerCamelCase_ : Union[str, Any] = self.dp[i - 1][j]
lowerCamelCase_ : Any = self.dp[i - 1][j - 1]
lowerCamelCase_ : Optional[int] = 1 + min(__a , __a , __a )
return self.dp[m][n]
if __name__ == "__main__":
snake_case__ : Optional[Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
snake_case__ : Union[str, Any] = input('Enter the first string: ').strip()
snake_case__ : List[Any] = input('Enter the second string: ').strip()
print()
print(F'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(F'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 278
| 0
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Dict = 'tokenizer_file'
SCREAMING_SNAKE_CASE_ : List[str] = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase__ ( self ) -> List[Any]:
super().setUp()
lowercase_ : int = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self , **_lowercase ) -> Any:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[int] = self.get_rust_tokenizer()
lowercase_ : Any = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
lowercase_ : Dict = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
lowercase_ : List[Any] = tokenizer.batch_encode_plus(_lowercase )['input_ids']
self.assertListEqual(_lowercase , _lowercase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=6 ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase_ : int = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase_ : Dict = 'This is a simple input'
lowercase_ : List[str] = ['This is a simple input 1', 'This is a simple input 2']
lowercase_ : List[Any] = ('This is a simple input', 'This is a pair')
lowercase_ : List[str] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_lowercase , max_length=_lowercase )
tokenizer_r.encode_plus(_lowercase , max_length=_lowercase )
tokenizer_r.batch_encode_plus(_lowercase , max_length=_lowercase )
tokenizer_r.encode(_lowercase , max_length=_lowercase )
tokenizer_r.batch_encode_plus(_lowercase , max_length=_lowercase )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
lowercase_ : List[Any] = None # Hotfixing padding = None
self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding='max_length' )
# Simple input
self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding='max_length' )
# Simple input
self.assertRaises(
_lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding='max_length' , )
# Pair input
self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding='max_length' )
# Pair input
self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding='max_length' )
# Pair input
self.assertRaises(
_lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding='max_length' , )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : List[str] = self.get_rust_tokenizer()
lowercase_ : Dict = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_lowercase )
lowercase_ : Optional[Any] = next(iter(_lowercase ) )['premise'] # pick up one data
lowercase_ : Any = list(sample_data.values() )
lowercase_ : int = list(map(tokenizer.encode , _lowercase ) )
lowercase_ : Optional[Any] = [tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase ) for x in output_tokens]
self.assertListEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> int:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 718
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7
| 0
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
UpperCamelCase = "bert-base-cased"
UpperCamelCase = "fp16"
UpperCamelCase = "bf16"
UpperCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :List[str] ):
super().setUp()
UpperCamelCase__ :str = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def __a ( self :Union[str, Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :List[Any] = f"""{i + 1}"""
UpperCamelCase__ :List[Any] = strategy
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __a ( self :Union[str, Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :Optional[int] = prefetch_policy
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Dict = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __a ( self :Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :Tuple = state_dict_type
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = AutoModel.from_pretrained(lowerCamelCase__ )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :int = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCamelCase__ :Optional[Any] = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
UpperCamelCase__ :Union[str, Any] = """2000"""
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :int = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :str = """TRANSFORMER_BASED_WRAP"""
UpperCamelCase__ :Union[str, Any] = """T5Layer"""
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Any = FullyShardedDataParallelPlugin()
with self.assertRaises(lowerCamelCase__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
UpperCamelCase__ :Dict = self.dist_env.copy()
UpperCamelCase__ :int = """SIZE_BASED_WRAP"""
UpperCamelCase__ :Union[str, Any] = """0"""
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __a ( self :Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCamelCase__ :Dict = self.dist_env.copy()
UpperCamelCase__ :Dict = mp_dtype
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = Accelerator()
if mp_dtype == "fp16":
UpperCamelCase__ :Tuple = torch.floataa
elif mp_dtype == "bf16":
UpperCamelCase__ :Tuple = torch.bfloataa
UpperCamelCase__ :int = MixedPrecision(param_dtype=lowerCamelCase__ , reduce_dtype=lowerCamelCase__ , buffer_dtype=lowerCamelCase__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowerCamelCase__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , lowerCamelCase__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowerCamelCase__ )
def __a ( self :Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCamelCase__ :List[str] = self.dist_env.copy()
UpperCamelCase__ :Dict = str(lowerCamelCase__ ).lower()
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowerCamelCase__ ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :Dict ):
super().setUp()
UpperCamelCase__ :str = 0.82
UpperCamelCase__ :int = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
UpperCamelCase__ :int = {
"""multi_gpu_fp16""": 32_00,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 20_00,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCamelCase__ :Optional[Any] = 1_60
UpperCamelCase__ :List[str] = 1_60
UpperCamelCase__ :Union[str, Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase__ :Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def __a ( self :str ):
UpperCamelCase__ :int = os.path.join(self.test_scripts_folder , """test_performance.py""" )
UpperCamelCase__ :List[str] = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
UpperCamelCase__ :Optional[Any] = cmd.copy()
for i, strategy in enumerate(lowerCamelCase__ ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
def __a ( self :str ):
UpperCamelCase__ :List[Any] = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
UpperCamelCase__ :Any = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
UpperCamelCase__ :Optional[int] = len(lowerCamelCase__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCamelCase__ :Tuple = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
UpperCamelCase__ :List[Any] = cmd_config[:-1]
UpperCamelCase__ :Tuple = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
UpperCamelCase__ :Optional[int] = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCamelCase__ :Optional[int] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(lowerCamelCase__ ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
| 45
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A( a ):
snake_case_ = '''Salesforce/blip-image-captioning-base'''
snake_case_ = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
snake_case_ = '''image_captioner'''
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ['''image''']
snake_case_ = ['''text''']
def __init__( self , *_snake_case , **_snake_case ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
return self.pre_processor(images=_snake_case , return_tensors='''pt''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
return self.model.generate(**_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
return self.pre_processor.batch_decode(_snake_case , skip_special_tokens=_snake_case )[0].strip()
| 219
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : int ={
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
_A : Any ={
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
_A : List[str] ={
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = ["""input_ids""", """attention_mask"""]
A_ = DistilBertTokenizer
def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : int=True , UpperCamelCase_ : Any="[UNK]" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Optional[Any]="[PAD]" , UpperCamelCase_ : List[Any]="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=None , **UpperCamelCase_ : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase_ ) != tokenize_chinese_chars
):
_lowercase : int = getattr(UpperCamelCase_ , normalizer_state.pop('type' ) )
_lowercase : str = do_lower_case
_lowercase : Dict = strip_accents
_lowercase : Optional[Any] = tokenize_chinese_chars
_lowercase : Any = normalizer_class(**UpperCamelCase_ )
_lowercase : Optional[Any] = do_lower_case
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str]=None ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowercase : List[str] = [self.sep_token_id]
_lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_lowercase : Optional[Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 704
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4
| 0
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCamelCase = 1_28 , __UpperCamelCase = 2_56 , __UpperCamelCase = 2000.0 , __UpperCamelCase = 7_68 , __UpperCamelCase = 12 , __UpperCamelCase = 12 , __UpperCamelCase = 64 , __UpperCamelCase = 20_48 , __UpperCamelCase = 0.1 , ):
"""simple docstring"""
super().__init__()
snake_case_ = nn.Sequential(
nn.Linear(__UpperCamelCase , d_model * 4 , bias=__UpperCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__UpperCamelCase ) , nn.SiLU() , )
snake_case_ = nn.Embedding(__UpperCamelCase , __UpperCamelCase )
snake_case_ = False
snake_case_ = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
snake_case_ = nn.Dropout(p=__UpperCamelCase )
snake_case_ = nn.ModuleList()
for lyr_num in range(__UpperCamelCase ):
# FiLM conditional T5 decoder
snake_case_ = DecoderLayer(d_model=__UpperCamelCase , d_kv=__UpperCamelCase , num_heads=__UpperCamelCase , d_ff=__UpperCamelCase , dropout_rate=__UpperCamelCase )
self.decoders.append(__UpperCamelCase )
snake_case_ = TaLayerNorm(__UpperCamelCase )
snake_case_ = nn.Dropout(p=__UpperCamelCase )
snake_case_ = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case_ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
snake_case_ = self.conditioning_emb(__UpperCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case_ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case_ = torch.broadcast_to(
torch.arange(__UpperCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
snake_case_ = self.position_encoding(__UpperCamelCase )
snake_case_ = self.continuous_inputs_projection(__UpperCamelCase )
inputs += position_encodings
snake_case_ = self.dropout(__UpperCamelCase )
# decoder: No padding present.
snake_case_ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case_ = [(x, self.encoder_decoder_mask(__UpperCamelCase , __UpperCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case_ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
snake_case_ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
snake_case_ = lyr(
__UpperCamelCase , conditioning_emb=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )[0]
snake_case_ = self.decoder_norm(__UpperCamelCase )
snake_case_ = self.post_dropout(__UpperCamelCase )
snake_case_ = self.spec_out(__UpperCamelCase )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=1E-6 ):
"""simple docstring"""
super().__init__()
snake_case_ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__UpperCamelCase , d_kv=__UpperCamelCase , num_heads=__UpperCamelCase , dropout_rate=__UpperCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__UpperCamelCase , d_kv=__UpperCamelCase , num_heads=__UpperCamelCase , dropout_rate=__UpperCamelCase , layer_norm_epsilon=__UpperCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__UpperCamelCase , d_ff=__UpperCamelCase , dropout_rate=__UpperCamelCase , layer_norm_epsilon=__UpperCamelCase ) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ):
"""simple docstring"""
snake_case_ = self.layer[0](
__UpperCamelCase , conditioning_emb=__UpperCamelCase , attention_mask=__UpperCamelCase , )
if encoder_hidden_states is not None:
snake_case_ = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
snake_case_ = self.layer[1](
__UpperCamelCase , key_value_states=__UpperCamelCase , attention_mask=__UpperCamelCase , )
# Apply Film Conditional Feed Forward layer
snake_case_ = self.layer[-1](__UpperCamelCase , __UpperCamelCase )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
super().__init__()
snake_case_ = TaLayerNorm(__UpperCamelCase )
snake_case_ = TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCamelCase )
snake_case_ = Attention(query_dim=__UpperCamelCase , heads=__UpperCamelCase , dim_head=__UpperCamelCase , out_bias=__UpperCamelCase , scale_qk=__UpperCamelCase )
snake_case_ = nn.Dropout(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ):
"""simple docstring"""
snake_case_ = self.layer_norm(__UpperCamelCase )
if conditioning_emb is not None:
snake_case_ = self.FiLMLayer(__UpperCamelCase , __UpperCamelCase )
# Self-attention block
snake_case_ = self.attention(__UpperCamelCase )
snake_case_ = hidden_states + self.dropout(__UpperCamelCase )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
super().__init__()
snake_case_ = Attention(query_dim=__UpperCamelCase , heads=__UpperCamelCase , dim_head=__UpperCamelCase , out_bias=__UpperCamelCase , scale_qk=__UpperCamelCase )
snake_case_ = TaLayerNorm(__UpperCamelCase , eps=__UpperCamelCase )
snake_case_ = nn.Dropout(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ):
"""simple docstring"""
snake_case_ = self.layer_norm(__UpperCamelCase )
snake_case_ = self.attention(
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
snake_case_ = hidden_states + self.dropout(__UpperCamelCase )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
super().__init__()
snake_case_ = TaDenseGatedActDense(d_model=__UpperCamelCase , d_ff=__UpperCamelCase , dropout_rate=__UpperCamelCase )
snake_case_ = TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCamelCase )
snake_case_ = TaLayerNorm(__UpperCamelCase , eps=__UpperCamelCase )
snake_case_ = nn.Dropout(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=None ):
"""simple docstring"""
snake_case_ = self.layer_norm(__UpperCamelCase )
if conditioning_emb is not None:
snake_case_ = self.film(__UpperCamelCase , __UpperCamelCase )
snake_case_ = self.DenseReluDense(__UpperCamelCase )
snake_case_ = hidden_states + self.dropout(__UpperCamelCase )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
super().__init__()
snake_case_ = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
snake_case_ = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
snake_case_ = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
snake_case_ = nn.Dropout(__UpperCamelCase )
snake_case_ = NewGELUActivation()
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.act(self.wi_a(__UpperCamelCase ) )
snake_case_ = self.wi_a(__UpperCamelCase )
snake_case_ = hidden_gelu * hidden_linear
snake_case_ = self.dropout(__UpperCamelCase )
snake_case_ = self.wo(__UpperCamelCase )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=1E-6 ):
"""simple docstring"""
super().__init__()
snake_case_ = nn.Parameter(torch.ones(__UpperCamelCase ) )
snake_case_ = eps
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__UpperCamelCase )
snake_case_ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case_ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__UpperCamelCase , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
super().__init__()
snake_case_ = nn.Linear(__UpperCamelCase , out_features * 2 , bias=__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.scale_bias(__UpperCamelCase )
snake_case_ , snake_case_ = torch.chunk(__UpperCamelCase , 2 , -1 )
snake_case_ = x * (1 + scale) + shift
return x
| 187
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """gpt_neo"""
__A = ["""past_key_values"""]
__A = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __UpperCamelCase=5_02_57 , __UpperCamelCase=20_48 , __UpperCamelCase=20_48 , __UpperCamelCase=24 , __UpperCamelCase=[[["global", "local"], 12]] , __UpperCamelCase=16 , __UpperCamelCase=None , __UpperCamelCase=2_56 , __UpperCamelCase="gelu_new" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=1E-5 , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=5_02_56 , __UpperCamelCase=5_02_56 , **__UpperCamelCase , ):
"""simple docstring"""
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_layers
snake_case_ = num_heads
snake_case_ = intermediate_size
snake_case_ = window_size
snake_case_ = activation_function
snake_case_ = resid_dropout
snake_case_ = embed_dropout
snake_case_ = attention_dropout
snake_case_ = classifier_dropout
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_range
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
snake_case_ = attention_types
snake_case_ = self.expand_attention_types_params(__UpperCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
@staticmethod
def __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
snake_case_ = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
import torch
snake_case_ = input.size()
snake_case_ = len(lowercase__ )
snake_case_ = shape[dimension]
snake_case_ = torch.arange(0 , lowercase__ , lowercase__ )
snake_case_ = torch.div(sizedim - size , lowercase__ , rounding_mode='floor' ) + 1
snake_case_ = torch.arange(lowercase__ ) + low_indices[:min_length][:, None]
snake_case_ = [slice(lowercase__ )] * rank
snake_case_ = indices
snake_case_ = input[s]
snake_case_ = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowercase__ )
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
import torch
snake_case_ = torch.arange(1 , lowercase__ )
snake_case_ = torch.remainder(lowercase__ , lowercase__ )
snake_case_ = remainders == 0
snake_case_ = candidates[divisor_indices]
snake_case_ = torch.max(lowercase__ )
return largest_divisor, torch.div(lowercase__ , lowercase__ , rounding_mode='floor' )
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction='inputs' )
snake_case_ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
snake_case_ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return self._config.num_heads
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ):
"""simple docstring"""
snake_case_ = super(__UpperCamelCase , self ).generate_dummy_inputs(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
# We need to order the input in the way they appears in the forward()
snake_case_ = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
snake_case_ , snake_case_ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
snake_case_ = seqlen + 2
snake_case_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case_ = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(self.num_layers )
]
snake_case_ = common_inputs['attention_mask']
if self.use_past:
snake_case_ = ordered_inputs['attention_mask'].dtype
snake_case_ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return 13
| 187
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Optional[Any] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 284
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def snake_case_ ( self : Optional[int] ):
__lowercase : Dict = tempfile.mkdtemp()
__lowercase : Tuple = 8
# DPR tok
__lowercase : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowercase : Optional[Any] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
__lowercase : str = os.path.join(_snake_case , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__lowercase : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowercase : Optional[Any] = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowercase : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowercase : Any = {'''unk_token''': '''<unk>'''}
__lowercase : int = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
__lowercase : List[Any] = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase : Tuple = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def snake_case_ ( self : List[Any] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def snake_case_ ( self : Dict ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def snake_case_ ( self : Optional[int] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def snake_case_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : Tuple ):
__lowercase : Dict = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Union[str, Any] = self.get_dummy_dataset()
__lowercase : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowercase : List[Any] = dataset
__lowercase : str = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def snake_case_ ( self : int , _snake_case : bool ):
__lowercase : Dict = self.get_dummy_dataset()
__lowercase : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__lowercase : List[Any] = os.path.join(self.tmpdirname , '''dataset''' )
__lowercase : Union[str, Any] = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__lowercase : Optional[Any] = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__lowercase : Optional[Any] = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _snake_case ) , )
return retriever
def snake_case_ ( self : str ):
__lowercase : List[str] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowercase : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__lowercase : str = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__lowercase : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_snake_case , open(_snake_case , '''wb''' ) )
__lowercase : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__lowercase : Optional[int] = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[str] = 1
__lowercase : Tuple = self.get_dummy_canonical_hf_index_retriever()
__lowercase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase , __lowercase , __lowercase : str = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ ( self : int ):
__lowercase : int = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowercase : Optional[Any] = self.get_dummy_dataset()
retriever.save_pretrained(_snake_case )
__lowercase : str = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowercase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Union[str, Any] = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def snake_case_ ( self : str ):
__lowercase : List[str] = 1
__lowercase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
__lowercase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase , __lowercase , __lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ ( self : Any ):
__lowercase : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
__lowercase : Optional[Any] = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowercase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : List[Any] = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def snake_case_ ( self : List[Any] ):
__lowercase : Any = 1
__lowercase : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
__lowercase : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase , __lowercase , __lowercase : Union[str, Any] = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ ( self : Any ):
__lowercase : str = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
__lowercase : Optional[int] = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowercase : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def snake_case_ ( self : Tuple ):
__lowercase : Optional[int] = 1
__lowercase : str = self.get_dummy_legacy_index_retriever()
__lowercase : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase , __lowercase , __lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Tuple = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
__lowercase : Tuple = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowercase : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case_ ( self : Optional[Any] ):
import torch
__lowercase : Tuple = 1
__lowercase : Any = self.get_dummy_canonical_hf_index_retriever()
__lowercase : str = [[5, 7], [10, 11]]
__lowercase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Any = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case )
__lowercase , __lowercase , __lowercase : Tuple = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(_snake_case , np.ndarray )
__lowercase : Optional[Any] = retriever(
_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case , return_tensors='''pt''' , )
__lowercase , __lowercase , __lowercase , __lowercase : Optional[Any] = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case , torch.Tensor )
self.assertIsInstance(_snake_case , torch.Tensor )
self.assertIsInstance(_snake_case , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case_ ( self : List[Any] ):
__lowercase : Tuple = self.get_dpr_ctx_encoder_tokenizer()
__lowercase : str = 1
__lowercase : int = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
retriever.set_ctx_encoder_tokenizer(_snake_case )
__lowercase : Tuple = [[5, 7], [10, 11]]
__lowercase : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Any = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case )
self.assertEqual(
len(_snake_case ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _snake_case ) # check for doc token related keys in dictionary.
| 284
| 1
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = (KDPMaDiscreteScheduler,)
lowerCamelCase = 10
def lowerCAmelCase ( self : List[Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case : Optional[Any] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**UpperCamelCase__ )
return config
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
snake_case : Optional[Any] = self.scheduler_classes[0]
snake_case : Optional[int] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case : int = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
snake_case : int = self.dummy_model()
snake_case : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case : Optional[int] = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case : Tuple = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
snake_case : str = model(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Any = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = output.prev_sample
snake_case : Optional[int] = torch.sum(torch.abs(UpperCamelCase__ ) )
snake_case : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
if torch_device == "mps":
return
snake_case : Any = self.scheduler_classes[0]
snake_case : str = self.get_scheduler_config()
snake_case : Optional[int] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
snake_case : int = self.dummy_model()
snake_case : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case : Union[str, Any] = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case : Optional[int] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[str] = model(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Union[str, Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : str = output.prev_sample
snake_case : int = torch.sum(torch.abs(UpperCamelCase__ ) )
snake_case : Tuple = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
if torch_device == "mps":
return
snake_case : str = self.scheduler_classes[0]
snake_case : str = self.get_scheduler_config()
snake_case : List[str] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
snake_case : int = self.dummy_model()
snake_case : List[Any] = self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case : str = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = model(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[str] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : Tuple = output.prev_sample
snake_case : Any = torch.sum(torch.abs(UpperCamelCase__ ) )
snake_case : Dict = torch.mean(torch.abs(UpperCamelCase__ ) )
if str(UpperCamelCase__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 638
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["PoolFormerFeatureExtractor"]
lowercase__ = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 638
| 1
|
from __future__ import annotations
import os
from collections.abc import Mapping
lowercase_ = tuple[int, int]
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , a : set[int] , a : Mapping[EdgeT, int] )-> Dict:
"""simple docstring"""
lowercase__ = vertices
lowercase__ = {
(min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : EdgeT , a : int )-> Any:
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowercase__ = weight
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[str]:
"""simple docstring"""
lowercase__ = Graph({min(self.vertices )} , {} )
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowercase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowercase__ = edge
lowercase__ = weight
subgraph.add_edge(_snake_case , _snake_case )
return subgraph
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = "p107_network.txt" ) -> int:
lowercase__ = os.path.abspath(os.path.dirname(__UpperCamelCase ) )
lowercase__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
lowercase__ = {}
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
with open(__UpperCamelCase ) as f:
lowercase__ = f.read().strip().split('\n' )
lowercase__ = [line.split(',' ) for line in data]
for edgea in range(1 , len(__UpperCamelCase ) ):
for edgea in range(__UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowercase__ = int(adjaceny_matrix[edgea][edgea] )
lowercase__ = Graph(set(range(len(__UpperCamelCase ) ) ) , __UpperCamelCase )
lowercase__ = graph.prims_algorithm()
lowercase__ = sum(graph.edges.values() )
lowercase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 707
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45
| 0
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _A( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase : Optional[datasets.Features] = None
class _A( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = PandasConfig
def UpperCAmelCase_ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase_ ( self , _A ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__A : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
__A : Optional[int] = data_files
if isinstance(_A , _A ):
__A : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[int] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
__A : Dict = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
__A : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Union[str, Any] = [dl_manager.iter_files(_A ) for file in files]
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files} ) )
return splits
def UpperCAmelCase_ ( self , _A ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__A : Optional[int] = table_cast(_A , self.config.features.arrow_schema )
return pa_table
def UpperCAmelCase_ ( self , _A ):
for i, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , 'rb' ) as f:
__A : List[str] = pa.Table.from_pandas(pd.read_pickle(_A ) )
yield i, self._cast_table(_A )
| 239
|
from ..utils import DummyObject, requires_backends
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = ['''torch''', '''scipy''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch', 'scipy'] )
| 239
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_A : List[Any] = KandinskyVaaInpaintPipeline
_A : Any = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_A : Any = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_A : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_A : Optional[Any] = False
@property
def lowerCamelCase(self ):
return 32
@property
def lowerCamelCase(self ):
return 32
@property
def lowerCamelCase(self ):
return self.time_input_dim
@property
def lowerCamelCase(self ):
return self.time_input_dim * 4
@property
def lowerCamelCase(self ):
return 100
@property
def lowerCamelCase(self ):
torch.manual_seed(0 )
A_ : Union[str, Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Any = UNetaDConditionModel(**lowerCAmelCase_ )
return model
@property
def lowerCamelCase(self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase(self ):
torch.manual_seed(0 )
A_ : str = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase(self ):
A_ : Tuple = self.dummy_unet
A_ : Optional[Any] = self.dummy_movq
A_ : int = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCAmelCase_ , )
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
A_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
A_ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase_ )
# create init_image
A_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : List[str] = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
A_ : Any = np.ones((64, 64) , dtype=np.floataa )
A_ : Dict = 0
if str(lowerCAmelCase_ ).startswith("""mps""" ):
A_ : List[str] = torch.manual_seed(lowerCAmelCase_ )
else:
A_ : List[Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
A_ : int = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase(self ):
A_ : Optional[int] = """cpu"""
A_ : List[Any] = self.get_dummy_components()
A_ : str = self.pipeline_class(**lowerCAmelCase_ )
A_ : Union[str, Any] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A_ : List[str] = pipe(**self.get_dummy_inputs(lowerCAmelCase_ ) )
A_ : Any = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(lowerCAmelCase_ ) , return_dict=lowerCAmelCase_ , )[0]
A_ : Any = image[0, -3:, -3:, -1]
A_ : int = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
A_ : Optional[Any] = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def lowerCamelCase(self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase(self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase(self ):
A_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A_ : List[str] = np.ones((768, 768) , dtype=np.floataa )
A_ : List[Any] = 0
A_ : int = """a hat"""
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase_ )
A_ : Optional[int] = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
A_ : Optional[int] = pipeline.to(lowerCAmelCase_ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase_ )
A_ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ , A_ : int = pipe_prior(
lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
A_ : Any = pipeline(
image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , image_embeds=lowerCAmelCase_ , negative_image_embeds=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
A_ : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
| 480
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
_lowerCAmelCase = logging.getLogger(__name__)
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = git.Repo(search_parent_directories=snake_case__ )
A_ : List[str] = {
"""repo_id""": str(snake_case__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(snake_case__ , """git_log.json""" ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ , indent=4 )
def __UpperCamelCase ( snake_case__ ):
if params.n_gpu <= 0:
A_ : Dict = 0
A_ : str = -1
A_ : int = True
A_ : Union[str, Any] = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
A_ : str = int(os.environ["""WORLD_SIZE"""] )
A_ : int = int(os.environ["""N_GPU_NODE"""] )
A_ : int = int(os.environ["""RANK"""] )
# number of nodes / node ID
A_ : Optional[int] = params.world_size // params.n_gpu_per_node
A_ : Optional[int] = params.global_rank // params.n_gpu_per_node
A_ : Tuple = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
A_ : Dict = 1
A_ : Tuple = 0
A_ : Dict = 0
A_ : Optional[int] = 0
A_ : List[str] = 1
A_ : List[Any] = 1
A_ : List[Any] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
A_ : Optional[int] = params.node_id == 0 and params.local_rank == 0
A_ : Optional[Any] = params.n_nodes > 1
# summary
A_ : str = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def __UpperCamelCase ( snake_case__ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 480
| 1
|
"""simple docstring"""
def _snake_case ( __snake_case : int ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCamelCase : Union[str, Any] = 1
_lowerCamelCase : Optional[int] = 1
while repunit:
_lowerCamelCase : Optional[int] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _snake_case ( __snake_case : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : Dict = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__snake_case ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCamelCase =(3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCamelCase =(4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class _lowerCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase__ : Node | None = None
for i in sorted(__SCREAMING_SNAKE_CASE , reverse=__SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[str] = Node(__SCREAMING_SNAKE_CASE , self.head )
def __iter__( self ) -> Iterator[int]:
"""simple docstring"""
UpperCamelCase__ : int = self.head
while node:
yield node.data
UpperCamelCase__ : List[Any] = node.next_node
def __len__( self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self ) -> str:
"""simple docstring"""
return " -> ".join([str(__SCREAMING_SNAKE_CASE ) for node in self] )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
return SortedLinkedList(list(UpperCamelCase__ ) + list(UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase =SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 285
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 395
|
"""simple docstring"""
def A_ ( __lowercase = 10 ):
if not isinstance(__lowercase , __lowercase ) or n < 0:
raise ValueError('Invalid input' )
UpperCamelCase_ : int =10**n
UpperCamelCase_ : List[str] =2_84_33 * (pow(2 , 7_83_04_57 , __lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 395
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( A : List[str] , A : Dict , A : Tuple , A : Union[str, Any] ): # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCAmelCase = (l + r) // 2
if v[m] >= key:
UpperCAmelCase = m
else:
UpperCAmelCase = m # noqa: E741
return r
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
if len(A ) == 0:
return 0
UpperCAmelCase = [0] * len(A )
UpperCAmelCase = 1
UpperCAmelCase = v[0]
for i in range(1 , len(A ) ):
if v[i] < tail[0]:
UpperCAmelCase = v[i]
elif v[i] > tail[length - 1]:
UpperCAmelCase = v[i]
length += 1
else:
UpperCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 210
|
from importlib import import_module
from .logging import get_logger
UpperCAmelCase : Union[str, Any] = get_logger(__name__)
class _A:
"""simple docstring"""
def __init__( self , _A , _A=None ):
__A : Union[str, Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , _A , getattr(_A , _A ) )
__A : Optional[int] = module._original_module if isinstance(_A , _PatchedModuleObj ) else module
class _A:
"""simple docstring"""
UpperCamelCase : Union[str, Any] = []
def __init__( self , _A , _A , _A , _A=None ):
__A : List[Any] = obj
__A : Dict = target
__A : Optional[int] = new
__A : Optional[Any] = target.split('.' )[0]
__A : Tuple = {}
__A : Dict = attrs or []
def __enter__( self ):
*__A , __A : str = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_A ) ):
try:
__A : Union[str, Any] = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__A : Union[str, Any] = getattr(self.obj , _A )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_A , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__A : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj , _A , _PatchedModuleObj(_A , attrs=self.attrs ) )
__A : List[str] = getattr(self.obj , _A )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_A , _A , _PatchedModuleObj(getattr(_A , _A , _A ) , attrs=self.attrs ) )
__A : str = getattr(_A , _A )
# finally set the target attribute
setattr(_A , _A , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__A : Union[str, Any] = getattr(import_module('.'.join(_A ) ) , _A )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _A ) is attr_value:
__A : Union[str, Any] = getattr(self.obj , _A )
setattr(self.obj , _A , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__A : Tuple = globals()['__builtins__'][target_attr]
setattr(self.obj , _A , self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self , *_A ):
for attr in list(self.original ):
setattr(self.obj , _A , self.original.pop(_A ) )
def UpperCAmelCase_ ( self ):
self.__enter__()
self._active_patches.append(self )
def UpperCAmelCase_ ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 239
| 0
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__lowercase = input('''Enter image url: ''').strip()
print(F'Downloading image from {url} ...')
__lowercase = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
__lowercase = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
__lowercase = requests.get(image_url).content
__lowercase = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F'Done. Image saved to disk as {file_name}.')
| 452
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowercase = datasets.load_iris()
__lowercase = np.array(data['''data'''])
__lowercase = np.array(data['''target'''])
__lowercase = data['''target_names''']
__lowercase , __lowercase , __lowercase , __lowercase = train_test_split(X, y)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return np.linalg.norm(np.array(SCREAMING_SNAKE_CASE ) - np.array(SCREAMING_SNAKE_CASE ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
__UpperCamelCase :List[str] = []
for data_point in data:
__UpperCamelCase :Optional[int] = euclidean_distance(data_point[0] , SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCamelCase :Any = [i[1] for i in sorted(SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCamelCase :Union[str, Any] = Counter(SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 452
| 1
|
__snake_case = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__snake_case = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__snake_case = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
def __a ( self : Any ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """num_attention_heads""" ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : List[str]=3_2 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=6_4_0 , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : Tuple="silu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : List[str]=3_2 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : str=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = last_hidden_size
__a = num_attention_heads
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
def __a ( self : Optional[int] ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : Any ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a = MobileViTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self : Optional[Any] ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
a_ :List[str] =(
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
a_ :str =(
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ :Tuple =False
a_ :Dict =False
a_ :int =False
a_ :Optional[int] =False
def __a ( self : List[str] ):
'''simple docstring'''
__a = MobileViTModelTester(self )
__a = MobileViTConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def __a ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def __a ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def __a ( self : int ):
'''simple docstring'''
pass
def __a ( self : Dict ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(SCREAMING_SNAKE_CASE__ )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __a ( self : Tuple ):
'''simple docstring'''
pass
def __a ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __a ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
__a = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __a ( self : List[str] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def __a ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self : List[str] ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def __a ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(SCREAMING_SNAKE_CASE__ )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__a = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
__a = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
__a = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __a ( self : Any ):
'''simple docstring'''
__a = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a = model.to(SCREAMING_SNAKE_CASE__ )
__a = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a = prepare_img()
__a = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__a = model(**SCREAMING_SNAKE_CASE__ )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
__a = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
__a = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a = model.to(SCREAMING_SNAKE_CASE__ )
__a = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__a = prepare_img()
__a = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__a = model(**SCREAMING_SNAKE_CASE__ )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ , target_sizes=[(5_0, 6_0)] )
__a = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
__a = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ )
__a = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
| 582
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class snake_case ( unittest.TestCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=4 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_attention_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_choices
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class snake_case ( __lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase__ = True
UpperCAmelCase__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _lowercase (self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class snake_case ( unittest.TestCase ):
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE_ = model(lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE_ = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE_ = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 718
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CTRLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
| 628
| 0
|
from __future__ import annotations
import math
def __A ( _A , _A , _A , _A , _A ):
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
)
def __A ( ):
"""simple docstring"""
__a = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__a = math.log(len(_A ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , _A , _A , _A )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 197
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Optional[int] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_lowerCAmelCase : str = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_lowerCAmelCase : int = {
"camembert-base": 5_1_2,
}
_lowerCAmelCase : str = "▁"
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=["<s>NOTUSED", "</s>NOTUSED"] , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[str] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , additional_special_tokens=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
snake_case__ : Tuple = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
snake_case__ : Optional[int] = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
snake_case__ : Union[str, Any] = len(self.fairseq_tokens_to_ids )
snake_case__ : Tuple = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
snake_case__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : int = [self.cls_token_id]
snake_case__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : Dict = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[str] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowerCamelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Any = []
snake_case__ : List[str] = ''''''
snake_case__ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase ) + token
snake_case__ : Any = True
snake_case__ : str = []
else:
current_sub_tokens.append(lowerCamelCase )
snake_case__ : List[str] = False
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def __getstate__( self ) -> str:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Optional[int] = None
return state
def __setstate__( self , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Optional[Any] = {}
snake_case__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 703
|
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.variance.sqrt()
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = args_dim
snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
snake_case__ : Optional[int] = domain_map
def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple = function
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = dim
snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
snake_case__ : List[Any] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 0.0
def lowercase__ ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase__ ( self , *lowerCamelCase ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowercase__ ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase__ ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694
| 0
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __magic_name__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowercase : int , ):
"""simple docstring"""
_UpperCamelCase: Dict = parent
_UpperCamelCase: Optional[int] = 13
_UpperCamelCase: str = 7
_UpperCamelCase: Dict = True
_UpperCamelCase: List[str] = True
_UpperCamelCase: Optional[int] = True
_UpperCamelCase: Dict = 99
_UpperCamelCase: List[str] = 32
_UpperCamelCase: Tuple = 2
_UpperCamelCase: Any = 4
_UpperCamelCase: Optional[int] = 37
_UpperCamelCase: int = '''gelu'''
_UpperCamelCase: Union[str, Any] = 0.1
_UpperCamelCase: Optional[int] = 0.1
_UpperCamelCase: Union[str, Any] = 512
_UpperCamelCase: Any = 16
_UpperCamelCase: Union[str, Any] = 2
_UpperCamelCase: Optional[Any] = 0.02
_UpperCamelCase: Union[str, Any] = 3
_UpperCamelCase: Optional[Any] = 4
_UpperCamelCase: List[Any] = None
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase: List[str] = None
if self.use_input_mask:
_UpperCamelCase: Dict = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase: int = None
_UpperCamelCase: int = None
_UpperCamelCase: List[str] = None
if self.use_labels:
_UpperCamelCase: int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase: Dict = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
): Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase: List[Any] = True
_UpperCamelCase: Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase: Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase ( self : Optional[Any] , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : Any , _lowercase : str , _lowercase : List[str] ):
"""simple docstring"""
_UpperCamelCase: Dict = TFEsmModel(config=_lowercase )
_UpperCamelCase: int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase: Optional[int] = model(_lowercase )
_UpperCamelCase: Union[str, Any] = [input_ids, input_mask]
_UpperCamelCase: Dict = model(_lowercase )
_UpperCamelCase: Tuple = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Dict , _lowercase : Dict , _lowercase : int , _lowercase : Any , _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Dict , ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = True
_UpperCamelCase: int = TFEsmModel(config=_lowercase )
_UpperCamelCase: Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
_UpperCamelCase: int = model(_lowercase )
_UpperCamelCase: Optional[int] = [input_ids, input_mask]
_UpperCamelCase: int = model(_lowercase , encoder_hidden_states=_lowercase )
# Also check the case where encoder outputs are not passed
_UpperCamelCase: Tuple = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Any , _lowercase : str , _lowercase : str , _lowercase : Any , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Tuple ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = TFEsmForMaskedLM(config=_lowercase )
_UpperCamelCase: str = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : List[str] ):
"""simple docstring"""
_UpperCamelCase: Any = self.num_labels
_UpperCamelCase: Optional[int] = TFEsmForTokenClassification(config=_lowercase )
_UpperCamelCase: Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase: Optional[Any] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
): List[Any] = config_and_inputs
_UpperCamelCase: List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : int = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase : str = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase : int = False
lowerCAmelCase : Dict = False
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = TFEsmModelTester(self )
_UpperCamelCase: int = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowercase )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase: int = TFEsmModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase: Optional[Any] = model_class(_lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_UpperCamelCase: str = model.get_bias()
assert isinstance(_lowercase , _lowercase )
for k, v in name.items():
assert isinstance(_lowercase , tf.Variable )
else:
_UpperCamelCase: List[Any] = model.get_output_embeddings()
assert x is None
_UpperCamelCase: int = model.get_bias()
assert name is None
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Tuple = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
_UpperCamelCase: List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase: Union[str, Any] = model(_lowercase )[0]
_UpperCamelCase: Optional[Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _lowercase )
# compare the actual values for a slice.
_UpperCamelCase: Dict = tf.constant(
[
[
[8.921518, -10.589814, -6.4671307],
[-6.3967156, -13.911377, -1.1211915],
[-7.781247, -13.951557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: List[Any] = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
_UpperCamelCase: Union[str, Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_UpperCamelCase: Dict = model(_lowercase )[0]
# compare the actual values for a slice.
_UpperCamelCase: Union[str, Any] = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 271
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : List[str] = StableDiffusionSAGPipeline
lowerCAmelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : int = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : int = False
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase: Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_UpperCamelCase: List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
_UpperCamelCase: Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCamelCase: int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_UpperCamelCase: Any = CLIPTextModel(_lowercase )
_UpperCamelCase: List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase: int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase ( self : Dict , _lowercase : List[Any] , _lowercase : int=0 ):
"""simple docstring"""
if str(_lowercase ).startswith('''mps''' ):
_UpperCamelCase: Tuple = torch.manual_seed(_lowercase )
else:
_UpperCamelCase: List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_UpperCamelCase: Dict = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Any = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_UpperCamelCase: Any = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
_UpperCamelCase: Tuple = '''.'''
_UpperCamelCase: Tuple = torch.manual_seed(0 )
_UpperCamelCase: Union[str, Any] = sag_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
_UpperCamelCase: Optional[Any] = output.images
_UpperCamelCase: Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase: Any = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_UpperCamelCase: int = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
_UpperCamelCase: List[Any] = '''.'''
_UpperCamelCase: str = torch.manual_seed(0 )
_UpperCamelCase: Dict = sag_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
_UpperCamelCase: Union[str, Any] = output.images
_UpperCamelCase: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase: int = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase: Tuple = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_UpperCamelCase: int = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
_UpperCamelCase: Any = '''.'''
_UpperCamelCase: List[Any] = torch.manual_seed(0 )
_UpperCamelCase: str = sag_pipe(
[prompt] , width=768 , height=512 , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
_UpperCamelCase: str = output.images
assert image.shape == (1, 512, 768, 3)
| 271
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
__magic_name__ : Optional[Any] = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = 'roberta-prelayernorm'
def __init__( self , __UpperCamelCase=50265 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase="absolute" , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , ):
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
@property
def lowercase_ ( self ):
if self.task == "multiple-choice":
A_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 701
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCAmelCase_ = Features({"""text""": Value("""string""" )} )
lowerCAmelCase_ = Features({"""labels""": ClassLabel} )
lowerCAmelCase_ = "text"
lowerCAmelCase_ = "labels"
def lowercase_ ( self , __UpperCamelCase ):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def lowercase_ ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 608
| 0
|
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase__ ( a ):
__snake_case = np.inf
def set_batch_size(a ) -> None:
nonlocal batch_size
if isinstance(a , a ):
__snake_case = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(a , a ):
__snake_case = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(a , a ) and feature.dtype == "binary":
__snake_case = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(a , a )
return None if batch_size is np.inf else batch_size
class a_ ( UpperCAmelCase__ ):
def __init__( self : Any , __lowerCAmelCase : NestedDataStructureLike[PathLike] , __lowerCAmelCase : Optional[NamedSplit] = None , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Tuple , ):
super().__init__(
__lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
__snake_case = path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else {self.split: path_or_paths}
__snake_case = _PACKAGED_DATASETS_MODULES['parquet'][1]
__snake_case = Parquet(
cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , hash=__lowerCAmelCase , **__lowerCAmelCase , )
def lowercase__ ( self : Tuple ):
# Build iterable dataset
if self.streaming:
__snake_case = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
__snake_case = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class a_ :
def __init__( self : Dict , __lowerCAmelCase : Dataset , __lowerCAmelCase : Union[PathLike, BinaryIO] , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Union[str, Any] , ):
__snake_case = dataset
__snake_case = path_or_buf
__snake_case = batch_size or get_writer_batch_size(dataset.features )
__snake_case = parquet_writer_kwargs
def lowercase__ ( self : Any ):
__snake_case = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__snake_case = self._write(file_obj=__lowerCAmelCase , batch_size=__lowerCAmelCase , **self.parquet_writer_kwargs )
else:
__snake_case = self._write(file_obj=self.path_or_buf , batch_size=__lowerCAmelCase , **self.parquet_writer_kwargs )
return written
def lowercase__ ( self : Any , __lowerCAmelCase : BinaryIO , __lowerCAmelCase : int , **__lowerCAmelCase : Optional[Any] ):
__snake_case = 0
__snake_case = parquet_writer_kwargs.pop('path_or_buf' , __lowerCAmelCase )
__snake_case = self.dataset.features.arrow_schema
__snake_case = pq.ParquetWriter(__lowerCAmelCase , schema=__lowerCAmelCase , **__lowerCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __lowerCAmelCase ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__snake_case = query_table(
table=self.dataset._data , key=slice(__lowerCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written
| 356
|
'''simple docstring'''
def lowerCamelCase__ ( a ):
__snake_case = [0] * len(a )
__snake_case = []
__snake_case = []
__snake_case = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(a ) ):
if indegree[i] == 0:
queue.append(a )
while queue:
__snake_case = queue.pop(0 )
cnt += 1
topo.append(a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(a )
if cnt != len(a ):
print('Cycle exists' )
else:
print(a )
# Adjacency List of Graph
_lowercase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 356
| 1
|
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __magic_name__ ( __lowerCAmelCase):
A: Any = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A: Any = "CIDAS/clipseg-rd64-refined"
A: Optional[int] = "image_segmenter"
A: Dict = CLIPSegForImageSegmentation
A: Union[str, Any] = ["image", "text"]
A: Any = ["image"]
def __init__( self : int , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = outputs.cpu().detach().numpy()
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 713
|
from math import factorial, radians
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int = 18 , SCREAMING_SNAKE_CASE : int = 10 ):
"""simple docstring"""
UpperCamelCase__ : Dict = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
UpperCamelCase__ : int = radians(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = angle_in_radians
UpperCamelCase__ : str = 3
UpperCamelCase__ : int = -1
for _ in range(SCREAMING_SNAKE_CASE ):
result += (b * (angle_in_radians**a)) / factorial(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__import__("doctest").testmod()
| 106
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[Any] = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Union[str, Any] , **_snake_case : List[str] ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A__ = deprecated_arg[3:]
A__ = not kwargs.pop(_snake_case )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
A__ = kwargs.pop('tpu_name' , self.tpu_name )
A__ = kwargs.pop('device_idx' , self.device_idx )
A__ = kwargs.pop('eager_mode' , self.eager_mode )
A__ = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**_snake_case )
A__ : str = field(
default=UpperCAmelCase_ , metadata={"help": "Name of TPU"} , )
A__ : int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
A__ : bool = field(default=UpperCAmelCase_ , metadata={"help": "Benchmark models in eager model."} )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _a ( self : Dict ):
"""simple docstring"""
requires_backends(self , ['tf'] )
A__ = None
if self.tpu:
try:
if self.tpu_name:
A__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A__ = None
return tpu
@cached_property
def _a ( self : str ):
"""simple docstring"""
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A__ = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
A__ = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
A__ = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def _a ( self : Tuple ):
"""simple docstring"""
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def _a ( self : Any ):
"""simple docstring"""
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _a ( self : Dict ):
"""simple docstring"""
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.n_gpu > 0
| 9
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : str , _snake_case : Union[str, Any]=13 , _snake_case : Any=64 , _snake_case : Optional[Any]=3 , _snake_case : Dict=4 , _snake_case : Tuple=[2, 2, 2, 2] , _snake_case : str=[8, 4, 2, 1] , _snake_case : Union[str, Any]=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : List[str]=[1, 2, 4, 8] , _snake_case : int=True , _snake_case : int=True , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=0.02 , _snake_case : Tuple=3 , _snake_case : int=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def _a ( self : int ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : int ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Any ):
"""simple docstring"""
A__ = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Optional[Any] = True
A__ : str = False
A__ : Tuple = False
A__ : Dict = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(_snake_case )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Dict , _snake_case : int , _snake_case : List[Any] ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> str:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _snake_case )
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 9
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : Union[str, Any] = "philschmid/bart-large-cnn-samsum"
_lowercase : Optional[int] = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
_lowercase : List[str] = "summarizer"
_lowercase : int = AutoTokenizer
_lowercase : int = AutoModelForSeqaSeqLM
_lowercase : List[Any] = ["text"]
_lowercase : Optional[Any] = ["text"]
def _SCREAMING_SNAKE_CASE ( self: Dict , __UpperCamelCase: List[Any] ):
'''simple docstring'''
return self.pre_processor(__UpperCamelCase , return_tensors='pt' , truncation=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Any , __UpperCamelCase: List[Any] ):
'''simple docstring'''
return self.model.generate(**__UpperCamelCase )[0]
def _SCREAMING_SNAKE_CASE ( self: int , __UpperCamelCase: Union[str, Any] ):
'''simple docstring'''
return self.pre_processor.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
| 700
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class __UpperCamelCase :
_lowercase : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
_lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The column name of the images in the files."} )
_lowercase : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the training data."} )
_lowercase : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the validation data."} )
_lowercase : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
_lowercase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_lowercase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__ = {}
if self.train_dir is not None:
__magic_name__ = self.train_dir
if self.validation_dir is not None:
__magic_name__ = self.validation_dir
__magic_name__ = data_files if data_files else None
@dataclass
class __UpperCamelCase :
_lowercase : str = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
_lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
_lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
_lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
_lowercase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_lowercase : str = field(default=SCREAMING_SNAKE_CASE , metadata={"help": "Name or path of preprocessor config."} )
_lowercase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_lowercase : float = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
_lowercase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : float = field(
default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def _lowercase ( a_ : str ) -> Tuple:
'''simple docstring'''
__magic_name__ = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _lowercase ( ) -> Dict:
'''simple docstring'''
__magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__, __magic_name__, __magic_name__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__, __magic_name__, __magic_name__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' ,a_ ,a_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__magic_name__ = training_args.get_process_log_level()
logger.setLevel(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__magic_name__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__magic_name__ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,data_files=data_args.data_files ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# If we don't have a validation split, split off a percentage of train as validation.
__magic_name__ = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,a_ ) and data_args.train_val_split > 0.0:
__magic_name__ = ds['train'].train_test_split(data_args.train_val_split )
__magic_name__ = split['train']
__magic_name__ = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__magic_name__ = ViTMAEConfig.from_pretrained(model_args.config_name ,**a_ )
elif model_args.model_name_or_path:
__magic_name__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path ,**a_ )
else:
__magic_name__ = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__magic_name__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name ,**a_ )
elif model_args.model_name_or_path:
__magic_name__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path ,**a_ )
else:
__magic_name__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__magic_name__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=a_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
logger.info('Training new model from scratch' )
__magic_name__ = ViTMAEForPreTraining(a_ )
if training_args.do_train:
__magic_name__ = ds['train'].column_names
else:
__magic_name__ = ds['validation'].column_names
if data_args.image_column_name is not None:
__magic_name__ = data_args.image_column_name
elif "image" in column_names:
__magic_name__ = 'image'
elif "img" in column_names:
__magic_name__ = 'img'
else:
__magic_name__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__magic_name__ = image_processor.size['shortest_edge']
else:
__magic_name__ = (image_processor.size['height'], image_processor.size['width'])
__magic_name__ = Compose(
[
Lambda(lambda a_ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(a_ ,scale=(0.2, 1.0) ,interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean ,std=image_processor.image_std ),
] )
def preprocess_images(a_ : Dict ):
__magic_name__ = [transforms(a_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__magic_name__ = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(a_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__magic_name__ = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(a_ )
# Compute absolute learning rate
__magic_name__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__magic_name__ = training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
__magic_name__ = Trainer(
model=a_ ,args=a_ ,train_dataset=ds['train'] if training_args.do_train else None ,eval_dataset=ds['validation'] if training_args.do_eval else None ,tokenizer=a_ ,data_collator=a_ ,)
# Training
if training_args.do_train:
__magic_name__ = None
if training_args.resume_from_checkpoint is not None:
__magic_name__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__magic_name__ = last_checkpoint
__magic_name__ = trainer.train(resume_from_checkpoint=a_ )
trainer.save_model()
trainer.log_metrics('train' ,train_result.metrics )
trainer.save_metrics('train' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__magic_name__ = trainer.evaluate()
trainer.log_metrics('eval' ,a_ )
trainer.save_metrics('eval' ,a_ )
# Write model card and (optionally) push to hub
__magic_name__ = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def _lowercase ( a_ : str ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 184
| 0
|
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("check_bouncy() accepts only integer arguments" )
_lowerCamelCase : Optional[int] = str(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "".join(sorted(_lowerCAmelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def A_ ( _lowerCAmelCase : float = 99 ):
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
_lowerCamelCase : Any = 0
_lowerCamelCase : Union[str, Any] = 1
while True:
if check_bouncy(_lowerCAmelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 44
|
import random
from typing import Any
def UpperCAmelCase_ ( snake_case__ ) -> list[Any]:
"""simple docstring"""
for _ in range(len(snake_case__ ) ):
lowerCAmelCase__ = random.randint(0 , len(snake_case__ ) - 1 )
lowerCAmelCase__ = random.randint(0 , len(snake_case__ ) - 1 )
lowerCAmelCase__ , lowerCAmelCase__ = data[b], data[a]
return data
if __name__ == "__main__":
_lowerCAmelCase : int = [0, 1, 2, 3, 4, 5, 6, 7]
_lowerCAmelCase : Optional[Any] = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 193
| 0
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
def is_in_circle(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> bool:
_lowerCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_lowerCAmelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE_ ) )
# The ratio of the area for circle to square is pi/4.
_lowerCAmelCase = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Callable[[float], float] , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for _ in range(SCREAMING_SNAKE_CASE_ ) ) * (max_value - min_value)
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : float = 1.0 ):
'''simple docstring'''
def identity_function(SCREAMING_SNAKE_CASE_ : float ) -> float:
return x
_lowerCAmelCase = area_under_curve_estimator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
def function_to_integrate(SCREAMING_SNAKE_CASE_ : float ) -> float:
return sqrt(4.0 - x * x )
_lowerCAmelCase = area_under_curve_estimator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCAmelCase_ :
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
raise NotImplementedError()
def _snake_case ( self ) -> Optional[int]:
raise NotImplementedError()
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = False , **_lowerCAmelCase ) -> str:
_lowerCAmelCase = tokenizer
_lowerCAmelCase = skip_prompt
_lowerCAmelCase = decode_kwargs
# variables used in the streaming process
_lowerCAmelCase = []
_lowerCAmelCase = 0
_lowerCAmelCase = True
def _snake_case ( self , _lowerCAmelCase ) -> List[Any]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
_lowerCAmelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_lowerCAmelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_lowerCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
_lowerCAmelCase = text[self.print_len :]
_lowerCAmelCase = []
_lowerCAmelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(_lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_lowerCAmelCase = text[self.print_len :]
self.print_len += len(_lowerCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_lowerCAmelCase = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(_lowerCAmelCase )
self.on_finalized_text(_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_lowerCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_lowerCAmelCase = text[self.print_len :]
_lowerCAmelCase = []
_lowerCAmelCase = 0
else:
_lowerCAmelCase = ""
_lowerCAmelCase = True
self.on_finalized_text(_lowerCAmelCase , stream_end=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = False ) -> List[Any]:
print(_lowerCAmelCase , flush=_lowerCAmelCase , end="" if not stream_end else None )
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None , **_lowerCAmelCase ) -> Dict:
super().__init__(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = Queue()
_lowerCAmelCase = None
_lowerCAmelCase = timeout
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = False ) -> Any:
self.text_queue.put(_lowerCAmelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> int:
return self
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 489
| 0
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class UpperCamelCase( unittest.TestCase ):
snake_case_ : List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case_ : str = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
__snake_case = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
__snake_case = text_generator("This is a test" , do_sample=_a )
self.assertEqual(
_a , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
__snake_case = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
_a , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
__snake_case = text_generator("This is a test" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"generated_token_ids": ANY(_a )},
{"generated_token_ids": ANY(_a )},
] , )
__snake_case = text_generator.model.config.eos_token_id
__snake_case = "<pad>"
__snake_case = text_generator(
["This is a test", "This is a second test"] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"generated_token_ids": ANY(_a )},
{"generated_token_ids": ANY(_a )},
],
[
{"generated_token_ids": ANY(_a )},
{"generated_token_ids": ANY(_a )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
__snake_case = text_generator("This is a test" , do_sample=_a )
self.assertEqual(
_a , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
__snake_case = text_generator(["This is a test", "This is a second test"] , do_sample=_a )
self.assertEqual(
_a , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] ) -> int:
'''simple docstring'''
__snake_case = TextGenerationPipeline(model=_a , tokenizer=_a )
return text_generator, ["This is a test", "Another test"]
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = "Hello I believe in"
__snake_case = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
__snake_case = text_generator(_a )
self.assertEqual(
_a , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
__snake_case = text_generator(_a , stop_sequence=" fe" )
self.assertEqual(_a , [{"generated_text": "Hello I believe in fe"}] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = text_generator.model
__snake_case = text_generator.tokenizer
__snake_case = text_generator("This is a test" )
self.assertEqual(_a , [{"generated_text": ANY(_a )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
__snake_case = text_generator("This is a test" , return_full_text=_a )
self.assertEqual(_a , [{"generated_text": ANY(_a )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
__snake_case = pipeline(task="text-generation" , model=_a , tokenizer=_a , return_full_text=_a )
__snake_case = text_generator("This is a test" )
self.assertEqual(_a , [{"generated_text": ANY(_a )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
__snake_case = text_generator("This is a test" , return_full_text=_a )
self.assertEqual(_a , [{"generated_text": ANY(_a )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
__snake_case = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"generated_text": ANY(_a )}, {"generated_text": ANY(_a )}],
[{"generated_text": ANY(_a )}, {"generated_text": ANY(_a )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__snake_case = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"generated_text": ANY(_a )}, {"generated_text": ANY(_a )}],
[{"generated_text": ANY(_a )}, {"generated_text": ANY(_a )}],
] , )
with self.assertRaises(_a ):
__snake_case = text_generator("test" , return_full_text=_a , return_text=_a )
with self.assertRaises(_a ):
__snake_case = text_generator("test" , return_full_text=_a , return_tensors=_a )
with self.assertRaises(_a ):
__snake_case = text_generator("test" , return_text=_a , return_tensors=_a )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__snake_case = text_generator("" )
self.assertEqual(_a , [{"generated_text": ANY(_a )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__snake_case = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__snake_case = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 5_0_0 , max_new_tokens=2_0 )
__snake_case = text_generator("This is a test" * 5_0_0 , handle_long_generation="hole" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(_a ):
text_generator(
"This is a test" * 5_0_0 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
import torch
# Classic `model_kwargs`
__snake_case = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__snake_case = pipe("This is a test" )
self.assertEqual(
_a , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__snake_case = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__snake_case = pipe("This is a test" )
self.assertEqual(
_a , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__snake_case = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__snake_case = pipe("This is a test" )
self.assertEqual(
_a , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Any:
'''simple docstring'''
import torch
__snake_case = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
import torch
__snake_case = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=_a , top_p=0.5 )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
'''simple docstring'''
__snake_case = "Hello world"
__snake_case = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
__snake_case = logging.get_logger("transformers.generation.tf_utils" )
else:
__snake_case = logging.get_logger("transformers.generation.utils" )
__snake_case = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_a ) as cl:
__snake_case = text_generator(_a , max_length=1_0 , max_new_tokens=1 )
self.assertIn(_a , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_a ) as cl:
__snake_case = text_generator(_a , max_new_tokens=1 )
self.assertNotIn(_a , cl.out )
with CaptureLogger(_a ) as cl:
__snake_case = text_generator(_a , max_length=1_0 )
self.assertNotIn(_a , cl.out )
| 371
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 691
| 0
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Dict = "align_text_model"
def __init__( self , __UpperCAmelCase=3_0522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
a__ : str = vocab_size
a__ : Optional[Any] = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : Dict = num_attention_heads
a__ : Tuple = hidden_act
a__ : Tuple = intermediate_size
a__ : Optional[Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Any = max_position_embeddings
a__ : Optional[Any] = type_vocab_size
a__ : Optional[Any] = initializer_range
a__ : int = layer_norm_eps
a__ : Optional[Any] = position_embedding_type
a__ : Optional[int] = use_cache
a__ : str = pad_token_id
@classmethod
def _A ( cls , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCAmelCase )
a__ , a__ : Dict = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
a__ : Union[str, Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Tuple = "align_vision_model"
def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 600 , __UpperCAmelCase = 2.0 , __UpperCAmelCase = 3.1 , __UpperCAmelCase = 8 , __UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , __UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , __UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , __UpperCAmelCase = [] , __UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , __UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , __UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , __UpperCAmelCase = 0.2_5 , __UpperCAmelCase = "swish" , __UpperCAmelCase = 2560 , __UpperCAmelCase = "mean" , __UpperCAmelCase = 0.0_2 , __UpperCAmelCase = 0.0_0_1 , __UpperCAmelCase = 0.9_9 , __UpperCAmelCase = 0.2 , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
a__ : List[Any] = num_channels
a__ : List[str] = image_size
a__ : str = width_coefficient
a__ : Any = depth_coefficient
a__ : List[str] = depth_divisor
a__ : Union[str, Any] = kernel_sizes
a__ : str = in_channels
a__ : Tuple = out_channels
a__ : Optional[int] = depthwise_padding
a__ : Union[str, Any] = strides
a__ : List[Any] = num_block_repeats
a__ : Tuple = expand_ratios
a__ : List[str] = squeeze_expansion_ratio
a__ : Optional[int] = hidden_act
a__ : List[Any] = hidden_dim
a__ : str = pooling_type
a__ : List[str] = initializer_range
a__ : List[str] = batch_norm_eps
a__ : List[Any] = batch_norm_momentum
a__ : Tuple = drop_connect_rate
a__ : Optional[int] = sum(__UpperCAmelCase ) * 4
@classmethod
def _A ( cls , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCAmelCase )
a__ , a__ : Union[str, Any] = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
a__ : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :List[Any] = "align"
A :str = True
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=640 , __UpperCAmelCase=1.0 , __UpperCAmelCase=0.0_2 , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
if text_config is None:
a__ : int = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
a__ : str = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
a__ : Union[str, Any] = AlignTextConfig(**__UpperCAmelCase )
a__ : Optional[int] = AlignVisionConfig(**__UpperCAmelCase )
a__ : Dict = projection_dim
a__ : Optional[Any] = temperature_init_value
a__ : Union[str, Any] = initializer_range
@classmethod
def _A ( cls , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : Any = copy.deepcopy(self.__dict__ )
a__ : Any = self.text_config.to_dict()
a__ : List[str] = self.vision_config.to_dict()
a__ : Any = self.__class__.model_type
return output
| 207
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
lowerCamelCase = True
from torch.cuda.amp import autocast
lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _a :
'''simple docstring'''
A :str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A :Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A :Optional[bool] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
A :Optional[bool] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Whether to log verbose messages or not."} , )
A :Optional[float] = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
A :Optional[float] = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
A :Optional[float] = field(
default=0.99_9995 , metadata={"help": "Decay of gumbel temperature during training."} )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Any:
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
a__ : int = logging.WARNING
if model_args.verbose_logging:
a__ : List[Any] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
a__ : Union[str, Any] = logging.INFO
logger.setLevel(__UpperCamelCase )
@dataclass
class _a :
'''simple docstring'''
A :str = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
A :Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A :Optional[str] = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
A :Optional[str] = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
A :Optional[str] = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
A :bool = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
A :Optional[int] = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
A :Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The number of processes to use for the preprocessing."} , )
A :Optional[float] = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class _a :
'''simple docstring'''
A :WavaVecaForPreTraining
A :WavaVecaFeatureExtractor
A :Union[bool, str] = "longest"
A :Optional[int] = None
A :Optional[int] = None
def __call__( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Dict = self.feature_extractor.pad(
__UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
a__ : Optional[int] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
a__ : Optional[int] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
a__ : Any = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
a__ : List[str] = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
a__ : List[Any] = 1
a__ : Optional[int] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
a__ : Union[str, Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__UpperCAmelCase , min_masks=2 , )
return batch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=1.0 , **__UpperCAmelCase ):
"""simple docstring"""
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
a__ : Optional[Any] = 0
a__ : Tuple = max_gumbel_temp
a__ : str = min_gumbel_temp
a__ : List[Any] = gumbel_temp_decay
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
model.train()
a__ : Optional[Any] = self._prepare_inputs(__UpperCAmelCase )
if self.use_amp:
with autocast():
a__ : List[Any] = self.compute_loss(__UpperCAmelCase , __UpperCAmelCase )
else:
a__ : List[Any] = self.compute_loss(__UpperCAmelCase , __UpperCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
a__ : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
a__ : List[Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
a__ : Optional[Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__UpperCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a__ , a__ , a__ : str = parser.parse_args_into_dataclasses()
configure_logger(__UpperCamelCase , __UpperCamelCase )
# Downloading and loading a dataset from the hub.
a__ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
a__ : Tuple = DatasetDict()
a__ : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
a__ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
a__ : List[Any] = DatasetDict()
a__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
a__ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
a__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__UpperCamelCase )
def prepare_dataset(__UpperCamelCase ):
# check that all files have the correct sampling rate
a__ , a__ : List[str] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
a__ : Optional[Any] = datasets.map(
__UpperCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
a__ : Tuple = vectorized_datasets.filter(
lambda __UpperCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__UpperCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
a__ : Tuple = vectorized_datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
a__ : List[Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
a__ : Optional[Any] = WavaVecaForPreTraining(__UpperCamelCase )
a__ : Dict = DataCollatorForWavaVecaPretraining(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
a__ : Optional[int] = WavaVecaPreTrainer(
model=__UpperCamelCase , data_collator=__UpperCamelCase , args=__UpperCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=__UpperCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 207
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''swinv2'''
UpperCamelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Tuple , UpperCAmelCase : Optional[int]=224 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Optional[Any]=96 , UpperCAmelCase : str=[2, 2, 6, 2] , UpperCAmelCase : Union[str, Any]=[3, 6, 12, 24] , UpperCAmelCase : int=7 , UpperCAmelCase : Dict=4.0 , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : Optional[Any]=1e-5 , UpperCAmelCase : Any=32 , **UpperCAmelCase : List[Any] , ) -> int:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : int =image_size
lowercase : Any =patch_size
lowercase : Union[str, Any] =num_channels
lowercase : Optional[Any] =embed_dim
lowercase : Union[str, Any] =depths
lowercase : int =len(UpperCAmelCase )
lowercase : Union[str, Any] =num_heads
lowercase : str =window_size
lowercase : Optional[int] =mlp_ratio
lowercase : Optional[Any] =qkv_bias
lowercase : Any =hidden_dropout_prob
lowercase : Optional[Any] =attention_probs_dropout_prob
lowercase : Optional[int] =drop_path_rate
lowercase : str =hidden_act
lowercase : Any =use_absolute_embeddings
lowercase : Optional[int] =layer_norm_eps
lowercase : Optional[Any] =initializer_range
lowercase : Dict =encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase : List[Any] =int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
lowercase : Any =(0, 0, 0, 0)
| 94
|
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Any = int(UpperCAmelCase_)
if decimal in (0, 1): # Exit cases for the recursion
return str(UpperCAmelCase_)
snake_case__ , snake_case__ : Optional[Any] = divmod(UpperCAmelCase_ , 2)
return binary_recursive(UpperCAmelCase_) + str(UpperCAmelCase_)
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Tuple = str(UpperCAmelCase_).strip()
if not number:
raise ValueError("""No input value was provided""")
snake_case__ : Optional[Any] = """-""" if number.startswith("""-""") else """"""
snake_case__ : Any = number.lstrip("""-""")
if not number.isnumeric():
raise ValueError("""Input value is not an integer""")
return F'{negative}0b{binary_recursive(int(UpperCAmelCase_))}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 648
| 0
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = 42
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , snake_case__ : Tuple=3 , snake_case__ : List[Any]=3 , snake_case__ : Any=("DownEncoderBlock2D",) , snake_case__ : Tuple=(64,) , snake_case__ : Dict=2 , snake_case__ : List[str]=32 , snake_case__ : List[Any]="silu" , snake_case__ : Optional[Any]=True , ):
"""simple docstring"""
super().__init__()
A =layers_per_block
A =torch.nn.Convad(
snake_case__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A =None
A =nn.ModuleList([] )
# down
A =block_out_channels[0]
for i, down_block_type in enumerate(snake_case__ ):
A =output_channel
A =block_out_channels[i]
A =i == len(snake_case__ ) - 1
A =get_down_block(
snake_case__ , num_layers=self.layers_per_block , in_channels=snake_case__ , out_channels=snake_case__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , )
self.down_blocks.append(snake_case__ )
# mid
A =UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , )
# out
A =nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=snake_case__ , eps=1E-6 )
A =nn.SiLU()
A =2 * out_channels if double_z else out_channels
A =nn.Convad(block_out_channels[-1] , snake_case__ , 3 , padding=1 )
A =False
def _a ( self : Any , snake_case__ : List[Any] ):
"""simple docstring"""
A =x
A =self.conv_in(snake_case__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(snake_case__ : List[Any] ):
def custom_forward(*snake_case__ : Tuple ):
return module(*snake_case__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
A =torch.utils.checkpoint.checkpoint(
create_custom_forward(snake_case__ ) , snake_case__ , use_reentrant=snake_case__ )
# middle
A =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , use_reentrant=snake_case__ )
else:
for down_block in self.down_blocks:
A =torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ )
# middle
A =torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , snake_case__ )
else:
# down
for down_block in self.down_blocks:
A =down_block(snake_case__ )
# middle
A =self.mid_block(snake_case__ )
# post-process
A =self.conv_norm_out(snake_case__ )
A =self.conv_act(snake_case__ )
A =self.conv_out(snake_case__ )
return sample
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Dict=3 , snake_case__ : Optional[Any]=3 , snake_case__ : Optional[int]=("UpDecoderBlock2D",) , snake_case__ : Tuple=(64,) , snake_case__ : List[str]=2 , snake_case__ : Tuple=32 , snake_case__ : Optional[int]="silu" , snake_case__ : List[str]="group" , ):
"""simple docstring"""
super().__init__()
A =layers_per_block
A =nn.Convad(
snake_case__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A =None
A =nn.ModuleList([] )
A =in_channels if norm_type == "spatial" else None
# mid
A =UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , )
# up
A =list(reversed(snake_case__ ) )
A =reversed_block_out_channels[0]
for i, up_block_type in enumerate(snake_case__ ):
A =output_channel
A =reversed_block_out_channels[i]
A =i == len(snake_case__ ) - 1
A =get_up_block(
snake_case__ , num_layers=self.layers_per_block + 1 , in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , resnet_time_scale_shift=snake_case__ , )
self.up_blocks.append(snake_case__ )
A =output_channel
# out
if norm_type == "spatial":
A =SpatialNorm(block_out_channels[0] , snake_case__ )
else:
A =nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=snake_case__ , eps=1E-6 )
A =nn.SiLU()
A =nn.Convad(block_out_channels[0] , snake_case__ , 3 , padding=1 )
A =False
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Dict=None ):
"""simple docstring"""
A =z
A =self.conv_in(snake_case__ )
A =next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(snake_case__ : str ):
def custom_forward(*snake_case__ : Optional[Any] ):
return module(*snake_case__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
A =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ )
A =sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
A =torch.utils.checkpoint.checkpoint(
create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ )
else:
# middle
A =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ )
A =sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
A =torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ )
else:
# middle
A =self.mid_block(snake_case__ , snake_case__ )
A =sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
A =up_block(snake_case__ , snake_case__ )
# post-process
if latent_embeds is None:
A =self.conv_norm_out(snake_case__ )
else:
A =self.conv_norm_out(snake_case__ , snake_case__ )
A =self.conv_act(snake_case__ )
A =self.conv_out(snake_case__ )
return sample
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Optional[int]="random" , snake_case__ : Tuple=False , snake_case__ : Optional[int]=True ):
"""simple docstring"""
super().__init__()
A =n_e
A =vq_embed_dim
A =beta
A =legacy
A =nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A =remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
A =self.used.shape[0]
A =unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A =self.re_embed
A =self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A =n_e
A =sane_index_shape
def _a ( self : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
A =inds.shape
assert len(snake_case__ ) > 1
A =inds.reshape(ishape[0] , -1 )
A =self.used.to(snake_case__ )
A =(inds[:, :, None] == used[None, None, ...]).long()
A =match.argmax(-1 )
A =match.sum(2 ) < 1
if self.unknown_index == "random":
A =torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A =self.unknown_index
return new.reshape(snake_case__ )
def _a ( self : Optional[int] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
A =inds.shape
assert len(snake_case__ ) > 1
A =inds.reshape(ishape[0] , -1 )
A =self.used.to(snake_case__ )
if self.re_embed > self.used.shape[0]: # extra token
A =0 # simply set to zero
A =torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , snake_case__ )
return back.reshape(snake_case__ )
def _a ( self : str , snake_case__ : List[Any] ):
"""simple docstring"""
A =z.permute(0 , 2 , 3 , 1 ).contiguous()
A =z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A =torch.argmin(torch.cdist(snake_case__ , self.embedding.weight ) , dim=1 )
A =self.embedding(snake_case__ ).view(z.shape )
A =None
A =None
# compute loss for embedding
if not self.legacy:
A =self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A =torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A =z + (z_q - z).detach()
# reshape back to match original input shape
A =z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A =min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A =self.remap_to_used(snake_case__ )
A =min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A =min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
if self.remap is not None:
A =indices.reshape(shape[0] , -1 ) # add batch axis
A =self.unmap_to_all(snake_case__ )
A =indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A =self.embedding(snake_case__ )
if shape is not None:
A =z_q.view(snake_case__ )
# reshape back to match original input shape
A =z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Any , snake_case__ : Tuple , snake_case__ : Any=False ):
"""simple docstring"""
A =parameters
A , A =torch.chunk(snake_case__ , 2 , dim=1 )
A =torch.clamp(self.logvar , -30.0 , 20.0 )
A =deterministic
A =torch.exp(0.5 * self.logvar )
A =torch.exp(self.logvar )
if self.deterministic:
A =A =torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _a ( self : Optional[Any] , snake_case__ : Optional[torch.Generator] = None ):
"""simple docstring"""
A =randn_tensor(
self.mean.shape , generator=snake_case__ , device=self.parameters.device , dtype=self.parameters.dtype )
A =self.mean + self.std * sample
return x
def _a ( self : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _a ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
A =np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=snake_case__ )
def _a ( self : Dict ):
"""simple docstring"""
return self.mean
| 689
|
from __future__ import annotations
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =u
for i in range(1 , a_ ):
A =temp * (u - i)
return temp
def UpperCamelCase_ ( ) ->None:
A =int(input("enter the numbers of values: " ) )
A =[]
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
A =0
print("enter the values of parameters in a list: " )
A =list(map(a_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a_ ):
A =float(input() )
A =int(input("enter the value to interpolate: " ) )
A =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
A =y[j + 1][i - 1] - y[j][i - 1]
A =y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 689
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(a , 'embed_dim'))
self.parent.assertTrue(hasattr(a , 'num_heads'))
class _snake_case :
def __init__( self , a , a=13 , a=64 , a=3 , a=[16, 48, 96] , a=[1, 3, 6] , a=[1, 2, 10] , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[2, 2, 2] , a=[False, False, True] , a=[0.0, 0.0, 0.0] , a=0.02 , a=1E-12 , a=True , a=True , a=2 , ) -> str:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = patch_stride
SCREAMING_SNAKE_CASE = patch_padding
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = stride_kv
SCREAMING_SNAKE_CASE = depth
SCREAMING_SNAKE_CASE = cls_token
SCREAMING_SNAKE_CASE = attention_drop_rate
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE = None
if self.use_labels:
# create a random int32 tensor of given shape
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels)
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Dict:
SCREAMING_SNAKE_CASE = TFCvtModel(config=a)
SCREAMING_SNAKE_CASE = model(a , training=a)
SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for i in range(len(self.depth)):
SCREAMING_SNAKE_CASE = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
SCREAMING_SNAKE_CASE = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[str]:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFCvtForImageClassification(a)
SCREAMING_SNAKE_CASE = model(a , labels=a , training=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : Optional[Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_lowercase : str = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
_lowercase : Dict = False
_lowercase : Optional[int] = False
_lowercase : Optional[Any] = False
_lowercase : Dict = False
_lowercase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFCvtModelTester(self)
SCREAMING_SNAKE_CASE = TFCvtConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ ( self) -> str:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings')
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU')) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU')) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = tf.keras.mixed_precision.Policy('mixed_float16')
tf.keras.mixed_precision.set_global_policy(a)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
SCREAMING_SNAKE_CASE = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
def check_hidden_states_output(a , a , a):
SCREAMING_SNAKE_CASE = model_class(a)
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(a , a))
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = len(self.model_tester.depth)
self.assertEqual(len(a) , a)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(a , a , a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(a , a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFCvtModel.from_pretrained(a)
self.assertIsNotNone(a)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='tf')
# forward pass
SCREAMING_SNAKE_CASE = model(**a)
# verify the logits
SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , a)
SCREAMING_SNAKE_CASE = tf.constant([0.92_85, 0.90_15, -0.31_50])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , a , atol=1E-4))
| 73
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCAmelCase_ = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
@classmethod
def __magic_name__( cls ):
lowerCAmelCase__ : Dict = TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def __magic_name__( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Any = flatten_dict(modela.params )
lowerCAmelCase__ : List[str] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowerCAmelCase__ : Optional[Any] = False
return models_are_equal
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase )
lowerCAmelCase__ : Dict = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = '''bert'''
lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = '''bert'''
lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 678
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class SCREAMING_SNAKE_CASE:
snake_case_ : int
snake_case_ : Node | None = None
snake_case_ : Node | None = None
def snake_case_ ( ):
"""simple docstring"""
__lowercase = Node(1 )
__lowercase = Node(2 )
__lowercase = Node(3 )
__lowercase = Node(4 )
__lowercase = Node(5 )
return tree
def snake_case_ ( a__ : Node | None ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def snake_case_ ( a__ : Node | None ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def snake_case_ ( a__ : Node | None ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def snake_case_ ( a__ : Node | None ):
"""simple docstring"""
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def snake_case_ ( a__ : Node | None ):
"""simple docstring"""
__lowercase = []
if root is None:
return output
__lowercase = deque([root] )
while process_queue:
__lowercase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def snake_case_ ( a__ : Node | None ,a__ : int ):
"""simple docstring"""
__lowercase = []
def populate_output(a__ : Node | None ,a__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(a__ ,a__ )
return output
def snake_case_ ( a__ : Node | None ,a__ : int ):
"""simple docstring"""
__lowercase = []
def populate_output(a__ : Node | None ,a__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(a__ ,a__ )
return output
def snake_case_ ( a__ : Node | None ):
"""simple docstring"""
if root is None:
return []
__lowercase = []
__lowercase = 0
__lowercase = height(a__ )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(a__ ,a__ ) )
__lowercase = 1
else:
output.append(get_nodes_from_right_to_left(a__ ,a__ ) )
__lowercase = 0
return output
def snake_case_ ( ): # Main function for testing.
"""simple docstring"""
__lowercase = make_tree()
print(f'In-order Traversal: {inorder(a__ )}' )
print(f'Pre-order Traversal: {preorder(a__ )}' )
print(f'Post-order Traversal: {postorder(a__ )}' ,"""\n""" )
print(f'Height of Tree: {height(a__ )}' ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(a__ ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(a__ ) + 1 ):
print(f'Level {level}:' ,get_nodes_from_left_to_right(a__ ,level=a__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 700
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
A : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE( __A ):
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 163
| 0
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case=3 , _snake_case=3 , _snake_case=("DownEncoderBlock2D",) , _snake_case=(64,) , _snake_case=2 , _snake_case=32 , _snake_case="silu" , _snake_case=True , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase = layers_per_block
UpperCAmelCase = torch.nn.Convad(
_snake_case , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase = None
UpperCAmelCase = nn.ModuleList([] )
# down
UpperCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(_snake_case ):
UpperCAmelCase = output_channel
UpperCAmelCase = block_out_channels[i]
UpperCAmelCase = i == len(_snake_case ) - 1
UpperCAmelCase = get_down_block(
_snake_case , num_layers=self.layers_per_block , in_channels=_snake_case , out_channels=_snake_case , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_snake_case , resnet_groups=_snake_case , attention_head_dim=_snake_case , temb_channels=_snake_case , )
self.down_blocks.append(_snake_case )
# mid
UpperCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_snake_case , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=_snake_case , temb_channels=_snake_case , )
# out
UpperCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_snake_case , eps=1e-6 )
UpperCAmelCase = nn.SiLU()
UpperCAmelCase = 2 * out_channels if double_z else out_channels
UpperCAmelCase = nn.Convad(block_out_channels[-1] , _snake_case , 3 , padding=1 )
UpperCAmelCase = False
def snake_case_ ( self , _snake_case ) -> int:
"""simple docstring"""
UpperCAmelCase = x
UpperCAmelCase = self.conv_in(_snake_case )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_snake_case ):
def custom_forward(*_snake_case ):
return module(*_snake_case )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(_snake_case ) , _snake_case , use_reentrant=_snake_case )
# middle
UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _snake_case , use_reentrant=_snake_case )
else:
for down_block in self.down_blocks:
UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(_snake_case ) , _snake_case )
# middle
UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _snake_case )
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase = down_block(_snake_case )
# middle
UpperCAmelCase = self.mid_block(_snake_case )
# post-process
UpperCAmelCase = self.conv_norm_out(_snake_case )
UpperCAmelCase = self.conv_act(_snake_case )
UpperCAmelCase = self.conv_out(_snake_case )
return sample
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case=3 , _snake_case=3 , _snake_case=("UpDecoderBlock2D",) , _snake_case=(64,) , _snake_case=2 , _snake_case=32 , _snake_case="silu" , _snake_case="group" , ) -> int:
"""simple docstring"""
super().__init__()
UpperCAmelCase = layers_per_block
UpperCAmelCase = nn.Convad(
_snake_case , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase = None
UpperCAmelCase = nn.ModuleList([] )
UpperCAmelCase = in_channels if norm_type == '''spatial''' else None
# mid
UpperCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_snake_case , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_snake_case , temb_channels=_snake_case , )
# up
UpperCAmelCase = list(reversed(_snake_case ) )
UpperCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_snake_case ):
UpperCAmelCase = output_channel
UpperCAmelCase = reversed_block_out_channels[i]
UpperCAmelCase = i == len(_snake_case ) - 1
UpperCAmelCase = get_up_block(
_snake_case , num_layers=self.layers_per_block + 1 , in_channels=_snake_case , out_channels=_snake_case , prev_output_channel=_snake_case , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_snake_case , resnet_groups=_snake_case , attention_head_dim=_snake_case , temb_channels=_snake_case , resnet_time_scale_shift=_snake_case , )
self.up_blocks.append(_snake_case )
UpperCAmelCase = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase = SpatialNorm(block_out_channels[0] , _snake_case )
else:
UpperCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_snake_case , eps=1e-6 )
UpperCAmelCase = nn.SiLU()
UpperCAmelCase = nn.Convad(block_out_channels[0] , _snake_case , 3 , padding=1 )
UpperCAmelCase = False
def snake_case_ ( self , _snake_case , _snake_case=None ) -> Any:
"""simple docstring"""
UpperCAmelCase = z
UpperCAmelCase = self.conv_in(_snake_case )
UpperCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_snake_case ):
def custom_forward(*_snake_case ):
return module(*_snake_case )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _snake_case , _snake_case , use_reentrant=_snake_case )
UpperCAmelCase = sample.to(_snake_case )
# up
for up_block in self.up_blocks:
UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(_snake_case ) , _snake_case , _snake_case , use_reentrant=_snake_case )
else:
# middle
UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _snake_case , _snake_case )
UpperCAmelCase = sample.to(_snake_case )
# up
for up_block in self.up_blocks:
UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(_snake_case ) , _snake_case , _snake_case )
else:
# middle
UpperCAmelCase = self.mid_block(_snake_case , _snake_case )
UpperCAmelCase = sample.to(_snake_case )
# up
for up_block in self.up_blocks:
UpperCAmelCase = up_block(_snake_case , _snake_case )
# post-process
if latent_embeds is None:
UpperCAmelCase = self.conv_norm_out(_snake_case )
else:
UpperCAmelCase = self.conv_norm_out(_snake_case , _snake_case )
UpperCAmelCase = self.conv_act(_snake_case )
UpperCAmelCase = self.conv_out(_snake_case )
return sample
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case="random" , _snake_case=False , _snake_case=True ) -> Optional[int]:
"""simple docstring"""
super().__init__()
UpperCAmelCase = n_e
UpperCAmelCase = vq_embed_dim
UpperCAmelCase = beta
UpperCAmelCase = legacy
UpperCAmelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCAmelCase = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
UpperCAmelCase = self.used.shape[0]
UpperCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase = self.re_embed
UpperCAmelCase = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
UpperCAmelCase = n_e
UpperCAmelCase = sane_index_shape
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = inds.shape
assert len(_snake_case ) > 1
UpperCAmelCase = inds.reshape(ishape[0] , -1 )
UpperCAmelCase = self.used.to(_snake_case )
UpperCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase = match.argmax(-1 )
UpperCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCAmelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCAmelCase = self.unknown_index
return new.reshape(_snake_case )
def snake_case_ ( self , _snake_case ) -> int:
"""simple docstring"""
UpperCAmelCase = inds.shape
assert len(_snake_case ) > 1
UpperCAmelCase = inds.reshape(ishape[0] , -1 )
UpperCAmelCase = self.used.to(_snake_case )
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase = 0 # simply set to zero
UpperCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _snake_case )
return back.reshape(_snake_case )
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
# reshape z -> (batch, height, width, channel) and flatten
UpperCAmelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCAmelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase = torch.argmin(torch.cdist(_snake_case , self.embedding.weight ) , dim=1 )
UpperCAmelCase = self.embedding(_snake_case ).view(z.shape )
UpperCAmelCase = None
UpperCAmelCase = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCAmelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCAmelCase = self.remap_to_used(_snake_case )
UpperCAmelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCAmelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , _snake_case , _snake_case ) -> str:
"""simple docstring"""
# shape specifying (batch, height, width, channel)
if self.remap is not None:
UpperCAmelCase = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCAmelCase = self.unmap_to_all(_snake_case )
UpperCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCAmelCase = self.embedding(_snake_case )
if shape is not None:
UpperCAmelCase = z_q.view(_snake_case )
# reshape back to match original input shape
UpperCAmelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=False ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = parameters
UpperCAmelCase , UpperCAmelCase = torch.chunk(_snake_case , 2 , dim=1 )
UpperCAmelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCAmelCase = deterministic
UpperCAmelCase = torch.exp(0.5 * self.logvar )
UpperCAmelCase = torch.exp(self.logvar )
if self.deterministic:
UpperCAmelCase = UpperCAmelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , _snake_case = None ) -> torch.FloatTensor:
"""simple docstring"""
# make sure sample is on the same device as the parameters and has same dtype
UpperCAmelCase = randn_tensor(
self.mean.shape , generator=_snake_case , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCAmelCase = self.mean + self.std * sample
return x
def snake_case_ ( self , _snake_case=None ) -> Optional[int]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , _snake_case , _snake_case=[1, 2, 3] ) -> Optional[int]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
UpperCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_snake_case )
def snake_case_ ( self ) -> int:
"""simple docstring"""
return self.mean
| 254
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase :
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=2 , _snake_case=3 , _snake_case=4 , _snake_case=2 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=36 , _snake_case=3 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=6 , _snake_case=6 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=1000 , ) -> Any:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = text_seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = coordinate_size
UpperCAmelCase = shape_size
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase = text_seq_length
UpperCAmelCase = (image_size // patch_size) ** 2 + 1
UpperCAmelCase = self.text_seq_length + self.image_seq_length
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase = bbox[i, j, 3]
UpperCAmelCase = bbox[i, j, 1]
UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase = bbox[i, j, 2]
UpperCAmelCase = bbox[i, j, 0]
UpperCAmelCase = t
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
"""simple docstring"""
UpperCAmelCase = LayoutLMvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
# text + image
UpperCAmelCase = model(_snake_case , pixel_values=_snake_case )
UpperCAmelCase = model(
_snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
UpperCAmelCase = model(_snake_case , bbox=_snake_case , pixel_values=_snake_case , token_type_ids=_snake_case )
UpperCAmelCase = model(_snake_case , bbox=_snake_case , pixel_values=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase = model(pixel_values=_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = LayoutLMvaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = model(
_snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = LayoutLMvaForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = model(
_snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = model(
_snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
"""simple docstring"""
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = LayoutLMvaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case=False ) -> str:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(_snake_case )
if model_class in get_values(_snake_case ):
UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_snake_case , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_snake_case ):
UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
elif model_class in get_values(_snake_case ):
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
elif model_class in [
*get_values(_snake_case ),
]:
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
elif model_class in [
*get_values(_snake_case ),
]:
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_snake_case , )
return inputs_dict
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
@slow
def snake_case_ ( self ) -> Any:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = LayoutLMvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=_snake_case ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(_snake_case )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_snake_case , return_tensors='''pt''' ).pixel_values.to(_snake_case )
UpperCAmelCase = torch.tensor([[1, 2]] )
UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCAmelCase = model(
input_ids=input_ids.to(_snake_case ) , bbox=bbox.to(_snake_case ) , pixel_values=pixel_values.to(_snake_case ) , )
# verify the logits
UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , _snake_case )
UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1e-4 ) )
| 254
| 1
|
def a__ ( A_, A_ ):
'''simple docstring'''
if not (isinstance(A_, A_ ) and isinstance(A_, A_ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
__magic_name__ = len(A_ )
__magic_name__ = len(A_ )
__magic_name__ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__magic_name__ = 0
__magic_name__ = 0
for i in range(1, texta_length + 1 ):
for j in range(1, texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__magic_name__ = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__magic_name__ = i
__magic_name__ = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """lilt"""
def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = classifier_dropout
__magic_name__ = channel_shrink_ratio
__magic_name__ = max_ad_position_embeddings
| 76
| 1
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Tuple = 10
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[str] = [1, 2, 3, 4]
_lowerCAmelCase : Dict = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : int = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
_lowerCAmelCase , _lowerCAmelCase : str = process_story(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Tuple = ["""It was the best of times."""]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : str = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase : Optional[int] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
'''simple docstring'''
_lowerCAmelCase : Dict = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase : int = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : str = 101
_lowerCAmelCase : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase : List[Any] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase )
| 429
|
# using dfs for finding eulerian path traversal
def _UpperCAmelCase (UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_lowerCAmelCase , _lowerCAmelCase : Dict = True, True
_lowerCAmelCase : Dict = dfs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return path
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Any = 0
_lowerCAmelCase : List[Any] = -1
for i in range(UpperCamelCase_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_lowerCAmelCase : Any = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : int = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_lowerCAmelCase , _lowerCAmelCase : Tuple = check_circuit_or_path(UpperCamelCase_ , UpperCamelCase_ )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
_lowerCAmelCase : Dict = 1
if check == 2:
_lowerCAmelCase : Any = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
_lowerCAmelCase : Optional[int] = dfs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print(UpperCamelCase_ )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_lowerCAmelCase : Optional[int] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_lowerCAmelCase : Optional[int] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_lowerCAmelCase : List[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_lowerCAmelCase : Any = {
1: [],
2: []
# all degree is zero
}
_lowerCAmelCase : Any = 10
check_euler(UpperCamelCase_ , UpperCamelCase_ )
check_euler(UpperCamelCase_ , UpperCamelCase_ )
check_euler(UpperCamelCase_ , UpperCamelCase_ )
check_euler(UpperCamelCase_ , UpperCamelCase_ )
check_euler(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 429
| 1
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCAmelCase = 10
def lowerCamelCase (a_ :int , a_ :int , a_ :list[int] , a_ :int) -> int:
for i in range(a_ , a_):
if array[i] == target:
return i
return -1
def lowerCamelCase (a_ :list[int] , a_ :int) -> int:
lowercase :str = 0
lowercase :Any = len(a_)
while left <= right:
if right - left < precision:
return lin_search(a_ , a_ , a_ , a_)
lowercase :Union[str, Any] = (left + right) // 3 + 1
lowercase :int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowercase :Dict = one_third - 1
elif array[two_third] < target:
lowercase :Tuple = two_third + 1
else:
lowercase :str = one_third + 1
lowercase :Optional[Any] = two_third - 1
else:
return -1
def lowerCamelCase (a_ :int , a_ :int , a_ :list[int] , a_ :int) -> int:
if left < right:
if right - left < precision:
return lin_search(a_ , a_ , a_ , a_)
lowercase :Any = (left + right) // 3 + 1
lowercase :Union[str, Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(a_ , one_third - 1 , a_ , a_)
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , a_ , a_ , a_)
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , a_ , a_)
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = input('''Enter numbers separated by comma:\n''').strip()
UpperCAmelCase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
UpperCAmelCase = int(input('''Enter the number to be found in the list:\n''').strip())
UpperCAmelCase = ite_ternary_search(collection, target)
UpperCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('''Not found''')
| 475
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( __UpperCAmelCase ):
__A : torch.FloatTensor
__A : torch.FloatTensor
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
__A : Any = 1
@register_to_config
def __init__( self : int , snake_case__ : int = 2_0_0_0 , snake_case__ : float = 0.15 , snake_case__ : float = 0.01 , snake_case__ : float = 13_48.0 , snake_case__ : float = 1e-5 , snake_case__ : int = 1 , ):
'''simple docstring'''
lowercase :List[Any] = sigma_max
# setable values
lowercase :List[str] = None
self.set_sigmas(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __snake_case ( self : Optional[int] , snake_case__ : int , snake_case__ : float = None , snake_case__ : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase :Optional[int] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase :Tuple = torch.linspace(1 , snake_case__ , snake_case__ , device=snake_case__ )
def __snake_case ( self : List[str] , snake_case__ : int , snake_case__ : float = None , snake_case__ : float = None , snake_case__ : float = None ):
'''simple docstring'''
lowercase :int = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase :Any = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase :Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case__ , snake_case__ )
lowercase :Union[str, Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase :str = torch.exp(torch.linspace(math.log(snake_case__ ) , math.log(snake_case__ ) , snake_case__ ) )
lowercase :List[str] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __snake_case ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : int ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __snake_case ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Optional[torch.Generator] = None , snake_case__ : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase :Optional[int] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase :Optional[int] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase :Union[str, Any] = timesteps.to(self.discrete_sigmas.device )
lowercase :List[Any] = self.discrete_sigmas[timesteps].to(sample.device )
lowercase :str = self.get_adjacent_sigma(snake_case__ , snake_case__ ).to(sample.device )
lowercase :Optional[Any] = torch.zeros_like(snake_case__ )
lowercase :List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase :Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase :Dict = diffusion.unsqueeze(-1 )
lowercase :List[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase :List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case__ , device=sample.device , dtype=sample.dtype )
lowercase :int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase :Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case__ , prev_sample_mean=snake_case__ )
def __snake_case ( self : Union[str, Any] , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : Optional[torch.Generator] = None , snake_case__ : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase :Tuple = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase :List[Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowercase :List[str] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowercase :int = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase :Optional[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase :Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase :Union[str, Any] = step_size.unsqueeze(-1 )
lowercase :Union[str, Any] = sample + step_size * model_output
lowercase :Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case__ )
def __snake_case ( self : List[str] , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , ):
'''simple docstring'''
lowercase :List[Any] = timesteps.to(original_samples.device )
lowercase :str = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase :Optional[int] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case__ ) * sigmas[:, None, None, None]
)
lowercase :str = noise + original_samples
return noisy_samples
def __len__( self : Optional[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 475
| 1
|
import math
def _A ( lowerCamelCase , lowerCamelCase ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
SCREAMING_SNAKE_CASE__ : str = "Enter the base and the power separated by a comma: "
SCREAMING_SNAKE_CASE__ : List[str] = map(int, input(prompt).split(""","""))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
SCREAMING_SNAKE_CASE__ : str = res(xa, ya)
SCREAMING_SNAKE_CASE__ : str = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 112
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : int = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class A ( a ):
__UpperCAmelCase : Dict = """transfo-xl"""
__UpperCAmelCase : Any = ["""mems"""]
__UpperCAmelCase : Any = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case_=2_6_7_7_3_5 , snake_case_=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , snake_case_=1_0_2_4 , snake_case_=1_0_2_4 , snake_case_=1_6 , snake_case_=6_4 , snake_case_=4_0_9_6 , snake_case_=4 , snake_case_=False , snake_case_=1_8 , snake_case_=1_6_0_0 , snake_case_=1_0_0_0 , snake_case_=True , snake_case_=True , snake_case_=0 , snake_case_=-1 , snake_case_=True , snake_case_=0.1 , snake_case_=0.0 , snake_case_=True , snake_case_="normal" , snake_case_=0.01 , snake_case_=0.01 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=0 , **snake_case_ , ) -> str:
_a = vocab_size
_a = []
self.cutoffs.extend(snake_case_ )
if proj_share_all_but_first:
_a = [False] + [True] * len(self.cutoffs )
else:
_a = [False] + [False] * len(self.cutoffs )
_a = d_model
_a = d_embed
_a = d_head
_a = d_inner
_a = div_val
_a = pre_lnorm
_a = n_layer
_a = n_head
_a = mem_len
_a = same_length
_a = attn_type
_a = clamp_len
_a = sample_softmax
_a = adaptive
_a = dropout
_a = dropatt
_a = untie_r
_a = init
_a = init_range
_a = proj_init_std
_a = init_std
_a = layer_norm_epsilon
super().__init__(eos_token_id=snake_case_ , **snake_case_ )
@property
def __lowerCAmelCase ( self ) -> Tuple:
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __lowerCAmelCase ( self , snake_case_ ) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 131
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
a_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
a_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class UpperCAmelCase__ ( lowercase_ ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : str = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : Tuple = RealmTokenizer
def __init__( self: Any , __lowerCAmelCase: Optional[int]=None , __lowerCAmelCase: Union[str, Any]=None , __lowerCAmelCase: Optional[Any]=True , __lowerCAmelCase: Dict="[UNK]" , __lowerCAmelCase: Tuple="[SEP]" , __lowerCAmelCase: Union[str, Any]="[PAD]" , __lowerCAmelCase: Optional[int]="[CLS]" , __lowerCAmelCase: Tuple="[MASK]" , __lowerCAmelCase: int=True , __lowerCAmelCase: Union[str, Any]=None , **__lowerCAmelCase: int , ) -> List[str]:
'''simple docstring'''
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase_ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase_ ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(lowerCamelCase_ , normalizer_state.pop("type" ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**lowerCamelCase_ )
__UpperCAmelCase = do_lower_case
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: Any , **__lowerCAmelCase: int ) -> int:
'''simple docstring'''
__UpperCAmelCase = PaddingStrategy.MAX_LENGTH
__UpperCAmelCase = text
__UpperCAmelCase = kwargs.pop("text_pair" , lowerCamelCase_ )
__UpperCAmelCase = kwargs.pop("return_tensors" , lowerCamelCase_ )
__UpperCAmelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(lowerCamelCase_ ):
if batch_text_pair is not None:
__UpperCAmelCase = batch_text_pair[idx]
else:
__UpperCAmelCase = None
__UpperCAmelCase = super().__call__(lowerCamelCase_ , lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
__UpperCAmelCase = encoded_candidates.get("input_ids" )
__UpperCAmelCase = encoded_candidates.get("attention_mask" )
__UpperCAmelCase = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowerCamelCase_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowerCamelCase_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowerCamelCase_ )
__UpperCAmelCase = {key: item for key, item in output_data.items() if len(lowerCamelCase_ ) != 0}
return BatchEncoding(lowerCamelCase_ , tensor_type=lowerCamelCase_ )
def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: Any , __lowerCAmelCase: Tuple=None ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self: str , __lowerCAmelCase: List[int] , __lowerCAmelCase: Optional[List[int]] = None ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: str , __lowerCAmelCase: Optional[str] = None ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 713
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = ['transformers', 'torch', 'note_seq']
def __init__( self: List[str] , *__lowerCAmelCase: Optional[int] , **__lowerCAmelCase: List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def _UpperCAmelCase ( cls: Optional[int] , *__lowerCAmelCase: Any , **__lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def _UpperCAmelCase ( cls: Union[str, Any] , *__lowerCAmelCase: Optional[Any] , **__lowerCAmelCase: Optional[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 286
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Any = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=snake_case_ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=snake_case_ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=snake_case_ )
return parser.parse_args()
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = parse_args()
# Import training_script as a module.
UpperCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase : List[Any] = script_fpath.stem
UpperCamelCase : Tuple = importlib.import_module(snake_case_ )
# Patch sys.argv
UpperCamelCase : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 499
|
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowerCamelCase ( _UpperCAmelCase ):
def a_ ( self ):
UpperCamelCase : int = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def a_ ( self ):
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def a_ ( self ):
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def a_ ( self ):
UpperCamelCase : List[Any] = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def a_ ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCamelCase : List[str] = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def a_ ( self ):
UpperCamelCase : str = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def a_ ( self ):
UpperCamelCase : int = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def a_ ( self ):
UpperCamelCase : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def a_ ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCamelCase : Tuple = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def a_ ( self ):
UpperCamelCase : str = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def a_ ( self ):
UpperCamelCase : Optional[int] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def a_ ( self ):
import PIL.Image
UpperCamelCase : Any = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=SCREAMING_SNAKE_CASE_ ) as mock_cast_to_python_objects:
UpperCamelCase : Union[str, Any] = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
UpperCamelCase , UpperCamelCase : Tuple = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , SCREAMING_SNAKE_CASE_ )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Tuple = pa.BufferReader(snake_case_ ) if isinstance(snake_case_ ,pa.Buffer ) else pa.memory_map(snake_case_ )
UpperCamelCase : Union[str, Any] = pa.ipc.open_stream(snake_case_ )
UpperCamelCase : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def A_ ( snake_case_ : List[str] ,snake_case_ : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = pa.BufferOutputStream()
UpperCamelCase : Optional[Any] = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ ,schema=snake_case_ ,writer_batch_size=snake_case_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCamelCase , UpperCamelCase : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase : Optional[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Any = pa.BufferOutputStream()
UpperCamelCase : Optional[Any] = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=snake_case_ ,features=snake_case_ ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
UpperCamelCase , UpperCamelCase : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
UpperCamelCase : Optional[Any] = pa.BufferReader(output.getvalue() )
UpperCamelCase : Dict = pa.ipc.open_stream(snake_case_ )
UpperCamelCase : pa.Table = f.read_all()
UpperCamelCase : str = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(snake_case_ )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
def A_ ( snake_case_ : Optional[Any] ):
'''simple docstring'''
UpperCamelCase : List[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case_ ,writer_batch_size=snake_case_ ,hash_salt="""split_name""" ,check_duplicates=snake_case_ ,) as writer:
with pytest.raises(snake_case_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} ,key=[1, 2] )
UpperCamelCase , UpperCamelCase : List[str] = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 2, 1_0] )
def A_ ( snake_case_ : Tuple ):
'''simple docstring'''
UpperCamelCase : str = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case_ ,writer_batch_size=snake_case_ ,hash_salt="""split_name""" ,check_duplicates=snake_case_ ,) as writer:
with pytest.raises(snake_case_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} ,key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} ,key=1_0 )
UpperCamelCase , UpperCamelCase : Optional[Any] = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 2, 1_0] )
def A_ ( snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : int = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case_ ,writer_batch_size=snake_case_ ,hash_salt="""split_name""" ,check_duplicates=snake_case_ ,) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} ,key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} ,key=2 )
UpperCamelCase , UpperCamelCase : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def A_ ( snake_case_ : Any ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Optional[int] = pa.BufferOutputStream()
UpperCamelCase : List[Any] = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ ,schema=snake_case_ ,writer_batch_size=snake_case_ ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
UpperCamelCase , UpperCamelCase : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase : Any = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def A_ ( snake_case_ : Optional[Any] ,snake_case_ : Any ):
'''simple docstring'''
UpperCamelCase : Optional[int] = pa.BufferOutputStream()
UpperCamelCase : Optional[Any] = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ ,schema=snake_case_ ,writer_batch_size=snake_case_ ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
UpperCamelCase , UpperCamelCase : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase : List[str] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def A_ ( snake_case_ : Optional[int] ,snake_case_ : Tuple ):
'''simple docstring'''
UpperCamelCase : int = pa.BufferOutputStream()
UpperCamelCase : Dict = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ ,schema=snake_case_ ,writer_batch_size=snake_case_ ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
UpperCamelCase , UpperCamelCase : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase : Optional[int] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def A_ ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Optional[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
UpperCamelCase : str = os.path.join(snake_case_ ,"""test.arrow""" )
with ArrowWriter(path=snake_case_ ,schema=pa.schema(snake_case_ ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
UpperCamelCase , UpperCamelCase : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(snake_case_ ,1 )
def A_ ( snake_case_ : Any ):
'''simple docstring'''
if pa.types.is_list(snake_case_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def A_ ( snake_case_ : Optional[int] ,snake_case_ : Optional[Any] ):
'''simple docstring'''
if isinstance(lst[0] ,snake_case_ ):
change_first_primitive_element_in_list(lst[0] ,snake_case_ )
else:
UpperCamelCase : Optional[Any] = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" ,[(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def A_ ( snake_case_ : Dict ,snake_case_ : Tuple ,snake_case_ : Tuple ):
'''simple docstring'''
UpperCamelCase : int = pa.array(TypedSequence(snake_case_ ,optimized_int_type=snake_case_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" ,[
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] ,)
@pytest.mark.parametrize("""sequence""" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def A_ ( snake_case_ : Any ,snake_case_ : Any ,snake_case_ : List[Any] ):
'''simple docstring'''
# in range
UpperCamelCase : Union[str, Any] = pa.array(OptimizedTypedSequence(snake_case_ ,col=snake_case_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
UpperCamelCase : Dict = copy.deepcopy(snake_case_ )
UpperCamelCase : Optional[int] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(snake_case_ ,snake_case_ )
UpperCamelCase : Any = pa.array(OptimizedTypedSequence(snake_case_ ,col=snake_case_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" ,[False, True] )
def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : List[str] ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=snake_case_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def A_ ( snake_case_ : Any ):
'''simple docstring'''
UpperCamelCase : Tuple = """mock://dataset-train.arrow"""
with ArrowWriter(path=snake_case_ ,storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs ,type(snake_case_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCamelCase , UpperCamelCase : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(snake_case_ )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Dict = pa.BufferOutputStream()
with ParquetWriter(stream=snake_case_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCamelCase , UpperCamelCase : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
UpperCamelCase : Union[str, Any] = pa.BufferReader(output.getvalue() )
UpperCamelCase : pa.Table = pq.read_table(snake_case_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" ,[False, True] )
def A_ ( snake_case_ : Tuple ,snake_case_ : List[str] ):
'''simple docstring'''
import PIL.Image
UpperCamelCase : Tuple = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) ,dtype=np.uinta ) ).save(snake_case_ ,format="""png""" )
UpperCamelCase : Dict = pa.BufferOutputStream()
with ParquetWriter(
stream=snake_case_ ,features=Features({"""image""": Image()} ) ,embed_local_files=snake_case_ ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
UpperCamelCase : Any = pa.BufferReader(output.getvalue() )
UpperCamelCase : pa.Table = pq.read_table(snake_case_ )
UpperCamelCase : Any = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] ,snake_case_ )
with open(snake_case_ ,"""rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Any = pa.schema([pa.field("""col_1""" ,pa.string() ,nullable=snake_case_ )] )
UpperCamelCase : Optional[int] = pa.BufferOutputStream()
with ArrowWriter(stream=snake_case_ ) as writer:
writer._build_writer(inferred_schema=snake_case_ )
assert writer._schema == pa.schema([pa.field("""col_1""" ,pa.string() )] )
| 499
| 1
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__A : Optional[Any] = [
'good first issue',
'feature request',
'wip',
]
def lowerCAmelCase_ ( ):
a__ = Github(os.environ['GITHUB_TOKEN'] )
a__ = g.get_repo('huggingface/accelerate' )
a__ = repo.get_issues(state='open' )
for issue in open_issues:
a__ = sorted([comment for comment in issue.get_comments()] , key=lambda a : i.created_at , reverse=a )
a__ = comments[0] if len(a ) > 0 else None
a__ = dt.utcnow()
a__ = (current_time - issue.updated_at).days
a__ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 126
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__A : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
super().__init__()
a__ = torchvision.models.resnetaaa(pretrained=_a )
a__ = list(model.children() )[:-2]
a__ = nn.Sequential(*_a )
a__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowercase__ ( self , _a ):
"""simple docstring"""
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
a__ = self.pool(self.model(_a ) )
a__ = torch.flatten(_a , start_dim=2 )
a__ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a ):
"""simple docstring"""
a__ = [json.loads(_a ) for l in open(_a )]
a__ = os.path.dirname(_a )
a__ = tokenizer
a__ = labels
a__ = len(_a )
a__ = max_seq_length
a__ = transforms
def __len__( self ):
"""simple docstring"""
return len(self.data )
def __getitem__( self , _a ):
"""simple docstring"""
a__ = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=_a ) )
a__ , a__ , a__ = sentence[0], sentence[1:-1], sentence[-1]
a__ = sentence[: self.max_seq_length]
a__ = torch.zeros(self.n_classes )
a__ = 1
a__ = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
a__ = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowercase__ ( self ):
"""simple docstring"""
a__ = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def lowerCAmelCase_ ( a : int ):
a__ = [len(row['sentence'] ) for row in batch]
a__ , a__ = len(a ), max(a )
a__ = torch.zeros(a , a , dtype=torch.long )
a__ = torch.zeros(a , a , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(a , a ) ):
a__ = input_row['sentence']
a__ = 1
a__ = torch.stack([row['image'] for row in batch] )
a__ = torch.stack([row['label'] for row in batch] )
a__ = torch.stack([row['image_start_token'] for row in batch] )
a__ = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCAmelCase_ ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCAmelCase_ ( ):
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 126
| 1
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase__ = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
inspect_dataset(__A , __A )
lowercase_ : List[str] = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
inspect_metric(__A , __A )
lowercase_ : Any = path + """.py"""
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
lowercase_ : Any = expected_configs[0]
assert expected_config in infos
lowercase_ : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = get_dataset_infos(__A )
assert expected_config in infos
lowercase_ : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 620
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36
| 0
|
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__A = logging.getLogger(__name__)
__A = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase)} , )
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__magic_name__ : bool = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__magic_name__ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__magic_name__ : bool = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _UpperCAmelCase ( self : Any ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""})
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
__magic_name__ : Optional[str] = field(default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""})
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
__magic_name__ : bool = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
__magic_name__ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
__magic_name__ : Optional[int] = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
__magic_name__ : Optional[int] = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__magic_name__ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""})
__magic_name__ : bool = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _UpperCAmelCase ( self : Union[str, Any] ):
if self.train_file is not None:
A__ : Tuple =self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A__ : List[str] =self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowercase ( UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
with open(UpperCamelCase , "r" , encoding="utf-8" ) as f:
A__ : Any =[json.loads(UpperCamelCase ) for line in f.read().splitlines() if (len(UpperCamelCase ) > 0 and not line.isspace())]
assert len(UpperCamelCase ) == len(UpperCamelCase )
A__ : List[str] ={c: dataset[c] for c in dataset.column_names}
A__ : Any =refs
return Dataset.from_dict(UpperCamelCase )
def lowercase ( ):
"""simple docstring"""
A__ : Any =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ : List[Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ : List[Any] =parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A__ : Optional[Any] =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ : str =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A__ : List[Any] =load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A__ : int =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
A__ : int =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
A__ : List[Any] ={}
if data_args.train_file is not None:
A__ : List[str] =data_args.train_file
if data_args.validation_file is not None:
A__ : str =data_args.validation_file
A__ : Union[str, Any] =data_args.train_file.split("." )[-1]
if extension == "txt":
A__ : Optional[Any] ="text"
A__ : List[Any] =load_dataset(UpperCamelCase , data_files=UpperCamelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ : List[str] ={
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
A__ : Optional[Any] =AutoConfig.from_pretrained(model_args.config_name , **UpperCamelCase )
elif model_args.model_name_or_path:
A__ : List[Any] =AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase )
else:
A__ : Optional[int] =CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
A__ : str ={
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A__ : int =AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCamelCase )
elif model_args.model_name_or_path:
A__ : int =AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCamelCase )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
A__ : List[str] =AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
A__ : Optional[Any] =AutoModelForMaskedLM.from_config(UpperCamelCase )
model.resize_token_embeddings(len(UpperCamelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A__ : int =datasets["train"].column_names
else:
A__ : Any =datasets["validation"].column_names
A__ : Dict ="text" if "text" in column_names else column_names[0]
A__ : List[Any] ="max_length" if data_args.pad_to_max_length else False
def tokenize_function(UpperCamelCase : Tuple ):
# Remove empty lines
A__ : str =[line for line in examples["text"] if len(UpperCamelCase ) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=data_args.max_seq_length )
A__ : List[str] =datasets.map(
UpperCamelCase , batched=UpperCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A__ : int =add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A__ : str =add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A__ : Tuple =data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A__ : Tuple =False
# Data collator
# This one will take care of randomly masking the tokens.
A__ : str =DataCollatorForWholeWordMask(tokenizer=UpperCamelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A__ : int =Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=UpperCamelCase , data_collator=UpperCamelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A__ : Optional[int] =last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A__ : List[Any] =model_args.model_name_or_path
else:
A__ : Optional[Any] =None
A__ : Optional[int] =trainer.train(resume_from_checkpoint=UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ : Optional[Any] =os.path.join(training_args.output_dir , "train_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCamelCase , "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# Evaluation
A__ : Dict ={}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ : Union[str, Any] =trainer.evaluate()
A__ : Tuple =math.exp(eval_output["eval_loss"] )
A__ : Dict =perplexity
A__ : str =os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(UpperCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def lowercase ( UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 707
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A : Optional[Any] = "pt"
elif is_tf_available():
__A : Optional[Any] = "tf"
else:
__A : Tuple = "jax"
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[Any] = ByTaTokenizer
__magic_name__ : Optional[Any] = False
def _UpperCAmelCase ( self : int ):
super().setUp()
A__ : List[Any] =ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase ( self : List[Any] ):
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def _UpperCAmelCase ( self : List[Any] , **UpperCamelCase__ : Union[str, Any] ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[Any]=20 , UpperCamelCase__ : Tuple=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
A__ : int =[]
for i in range(len(UpperCamelCase__ ) ):
try:
A__ : str =tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A__ : Any =list(filter(lambda UpperCamelCase__ : re.match(R"^[ a-zA-Z]+$" , t[1] ) , UpperCamelCase__ ) )
A__ : str =list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
A__ : int =toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
A__ : List[Any] =toks + toks
# toks_str = [t[1] for t in toks]
A__ : str =[t[0] for t in toks]
# Ensure consistency
A__ : Optional[int] =tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
A__ : List[Any] =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
A__ : List[Any] =" " + output_txt
A__ : Tuple =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Any =self.ta_base_tokenizer
A__ : str =tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
A__ : Any =tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =self.ta_base_tokenizer
A__ : Dict ="Unicode €."
A__ : List[Any] =tokenizer(UpperCamelCase__ )
A__ : List[str] =[88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , UpperCamelCase__ )
# decoding
A__ : Tuple =tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , "Unicode €.</s>" )
A__ : Tuple =tokenizer("e è é ê ë" )
A__ : Tuple =[104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , UpperCamelCase__ )
# decoding
A__ : Union[str, Any] =tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def _UpperCAmelCase ( self : Dict ):
A__ : Dict =self.ta_base_tokenizer
A__ : Any =["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
A__ : Any =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
A__ : Union[str, Any] =tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
A__ : str =list(batch.input_ids.numpy()[0] )
else:
A__ : int =list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Any =self.ta_base_tokenizer
A__ : List[Any] =["A long paragraph for summarization.", "Another paragraph for summarization."]
A__ : Tuple =tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , UpperCamelCase__ )
self.assertIn("attention_mask" , UpperCamelCase__ )
self.assertNotIn("decoder_input_ids" , UpperCamelCase__ )
self.assertNotIn("decoder_attention_mask" , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Union[str, Any] =self.ta_base_tokenizer
A__ : Union[str, Any] =[
"Summary of the text.",
"Another summary.",
]
A__ : Dict =tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding="max_length" , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _UpperCAmelCase ( self : List[str] ):
A__ : Optional[int] =self.ta_base_tokenizer
A__ : int =["A long paragraph for summarization. </s>"]
A__ : str =["Summary of the text. </s>"]
# fmt: off
A__ : int =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
A__ : str =[86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
A__ : int =tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch["input_ids"][0] )
self.assertEqual(UpperCamelCase__ , batch["labels"][0] )
def _UpperCAmelCase ( self : List[Any] ):
# safety check on max_len default value so we are sure the test works
A__ : List[str] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
A__ : List[Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ : Tuple =tempfile.mkdtemp()
A__ : List[str] =" He is very happy, UNwant\u00E9d,running"
A__ : str =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
A__ : Optional[Any] =tokenizer.__class__.from_pretrained(UpperCamelCase__ )
A__ : str =after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
A__ : int =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ : str =tempfile.mkdtemp()
A__ : Optional[Any] =" He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
A__ : Dict =tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
A__ : Tuple =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
A__ : List[str] =tokenizer.__class__.from_pretrained(UpperCamelCase__ )
A__ : List[str] =after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
A__ : List[str] =tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict ):
A__ : List[str] =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
A__ : List[Any] =json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
A__ : Tuple =json.load(UpperCamelCase__ )
A__ : int =[F'''<extra_id_{i}>''' for i in range(125 )]
A__ : int =added_tokens_extra_ids + [
"an_additional_special_token"
]
A__ : Optional[Any] =added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(UpperCamelCase__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A__ : Tuple =tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A__ : Optional[int] =added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=UpperCamelCase__ )]
A__ : Union[str, Any] =tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def _UpperCAmelCase ( self : List[Any] ):
A__ : str =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
A__ : Optional[int] =tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == "" )
def _UpperCAmelCase ( self : List[Any] ):
pass
def _UpperCAmelCase ( self : Optional[Any] ):
pass
def _UpperCAmelCase ( self : Optional[int] ):
pass
def _UpperCAmelCase ( self : Optional[int] ):
pass
def _UpperCAmelCase ( self : Any ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
A__ : Union[str, Any] =self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
A__ : Tuple =["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
A__ : Optional[Any] =tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] ):
A__ : Union[str, Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
A__ : List[str] =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
A__ : List[Any] =0
A__ : List[Any] =tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + "_id" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + "_id" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + "_id" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + "_id" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens_ids" ) , [] )
setattr(UpperCamelCase__ , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 595
| 0
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=9_9 , _lowercase=6_4 , _lowercase=5 , _lowercase=4 , _lowercase=6_4 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = parent
snake_case_ : int = batch_size
snake_case_ : List[str] = seq_length
snake_case_ : str = is_training
snake_case_ : List[Any] = use_input_mask
snake_case_ : int = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : List[str] = hidden_act
snake_case_ : str = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Optional[int] = type_sequence_label_size
snake_case_ : Any = initializer_range
snake_case_ : str = num_labels
snake_case_ : str = num_choices
snake_case_ : Optional[int] = scope
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : int = None
if self.use_input_mask:
snake_case_ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : int = None
snake_case_ : Dict = None
snake_case_ : Dict = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = MPNetModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case_ : Optional[Any] = model(__lowerCamelCase , __lowerCamelCase )
snake_case_ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = MPNetForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case_ : Any = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.num_labels
snake_case_ : Any = MPNetForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case_ : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.num_choices
snake_case_ : str = MPNetForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : str = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.num_labels
snake_case_ : Dict = MPNetForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case_ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
((snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_)) : List[str] = config_and_inputs
snake_case_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = MPNetModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__lowerCamelCase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCamelCase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCamelCase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCamelCase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCamelCase )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
snake_case_ : int = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
snake_case_ : str = model(__lowerCamelCase )[0]
snake_case_ : Any = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __lowerCamelCase )
snake_case_ : int = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
| 58
|
a__: str = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 190
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , __snake_case : int , __snake_case : Dict=7 , __snake_case : Any=3 , __snake_case : Dict=18 , __snake_case : Optional[Any]=30 , __snake_case : Tuple=400 , __snake_case : List[str]=True , __snake_case : Tuple=None , __snake_case : Tuple=True , __snake_case : Dict=False , __snake_case : Any=True , __snake_case : Optional[Any]=True , __snake_case : Dict=[0.5, 0.5, 0.5] , __snake_case : int=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : Optional[Any] = min_resolution
UpperCAmelCase_ : Union[str, Any] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Optional[Any] = size if size is not None else {'''height''': 18, '''width''': 20}
UpperCAmelCase_ : Dict = do_thumbnail
UpperCAmelCase_ : int = do_align_axis
UpperCAmelCase_ : Dict = do_pad
UpperCAmelCase_ : Optional[int] = do_normalize
UpperCAmelCase_ : List[str] = image_mean
UpperCAmelCase_ : Any = image_std
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_thumbnail''' ) )
self.assertTrue(hasattr(__snake_case , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(__snake_case , '''do_pad''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
UpperCAmelCase_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
UpperCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
pass
@is_flaky()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Optional[Any] = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : str = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : str = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 641
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 1
|
'''simple docstring'''
_UpperCamelCase = {str(digit): digit**5 for digit in range(10)}
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(SCREAMING_SNAKE_CASE ) )
def _lowercase ():
'''simple docstring'''
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(solution())
| 111
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""ViTFeatureExtractor"""]
_UpperCamelCase = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 111
| 1
|
import math
from datetime import datetime, timedelta
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> datetime:
lowercase__ : List[Any] = year % 19
lowercase__ : Union[str, Any] = year % 4
lowercase__ : List[str] = year % 7
lowercase__ : Union[str, Any] = math.floor(year / 1_00 )
lowercase__ : Optional[Any] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowercase__ : List[str] = leap_day_inhibits / 4
lowercase__ : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowercase__ : int = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowercase__ : Tuple = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowercase__ : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE_ ,4 ,19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE_ ,4 ,18 )
else:
return datetime(SCREAMING_SNAKE_CASE_ ,3 ,22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
__a : Tuple = '''will be''' if year > datetime.now().year else '''was'''
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 298
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__a : str = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__a : Dict = parser.parse_args()
__a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__a : Optional[int] = CLIPImageProcessor()
__a : Tuple = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__a : Any = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 298
| 1
|
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : str ) -> int:
__magic_name__: str = []
__magic_name__: Any = []
for i in range(self.num_layers ):
__magic_name__: str = self.in_channels if i == 0 else self.out_channels
__magic_name__: Optional[int] = FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__magic_name__: Any = resnets
__magic_name__: str = attentions
if self.add_downsample:
__magic_name__: List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , __snake_case : List[Any] , __snake_case : Any , __snake_case : Dict , __snake_case : List[str]=True ) -> str:
__magic_name__: str = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__magic_name__: Optional[Any] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
__magic_name__: Union[str, Any] = attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__magic_name__: Any = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = True
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : str ) -> Optional[int]:
__magic_name__: Optional[Any] = []
for i in range(self.num_layers ):
__magic_name__: str = self.in_channels if i == 0 else self.out_channels
__magic_name__: Tuple = FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: Union[str, Any] = resnets
if self.add_downsample:
__magic_name__: str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , __snake_case : int , __snake_case : List[str] , __snake_case : List[Any]=True ) -> Dict:
__magic_name__: Optional[Any] = ()
for resnet in self.resnets:
__magic_name__: Optional[int] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__magic_name__: List[str] = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : int ) -> int:
__magic_name__: Optional[int] = []
__magic_name__: Any = []
for i in range(self.num_layers ):
__magic_name__: int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__magic_name__: Any = self.prev_output_channel if i == 0 else self.out_channels
__magic_name__: Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: int = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__magic_name__: List[str] = resnets
__magic_name__: List[str] = attentions
if self.add_upsample:
__magic_name__: str = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Dict=True ) -> List[Any]:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__magic_name__: Any = res_hidden_states_tuple[-1]
__magic_name__: Optional[int] = res_hidden_states_tuple[:-1]
__magic_name__: Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__magic_name__: Optional[Any] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
__magic_name__: str = attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__magic_name__: Optional[Any] = self.upsamplers_a(__snake_case )
return hidden_states
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = True
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
__magic_name__: Any = []
for i in range(self.num_layers ):
__magic_name__: List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__magic_name__: List[Any] = self.prev_output_channel if i == 0 else self.out_channels
__magic_name__: List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: Any = resnets
if self.add_upsample:
__magic_name__: List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int]=True ) -> int:
for resnet in self.resnets:
# pop res hidden states
__magic_name__: List[Any] = res_hidden_states_tuple[-1]
__magic_name__: Dict = res_hidden_states_tuple[:-1]
__magic_name__: Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__magic_name__: Optional[int] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__magic_name__: Optional[Any] = self.upsamplers_a(__snake_case )
return hidden_states
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : Dict ) -> Dict:
# there is always at least one resnet
__magic_name__: Tuple = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__magic_name__: str = []
for _ in range(self.num_layers ):
__magic_name__: List[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__magic_name__: Tuple = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: Optional[Any] = resnets
__magic_name__: int = attentions
def __call__( self : str , __snake_case : Tuple , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Optional[Any]=True ) -> int:
__magic_name__: Optional[Any] = self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__magic_name__: Optional[Any] = attn(__snake_case , __snake_case , deterministic=__snake_case )
__magic_name__: str = resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 96
|
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : List[str] = AlbertTokenizer
A_ : List[Any] = AlbertTokenizerFast
A_ : List[str] = True
A_ : Any = True
A_ : List[Any] = True
def _A ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : int = AlbertTokenizer(a__ )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self : Tuple , a__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = "this is a test"
lowerCAmelCase__ : Optional[Any] = "this is a test"
return input_text, output_text
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = "<pad>"
lowerCAmelCase__ : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(a__ ) , 3_0000 )
def _A ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def _A ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : List[Any] = self.get_tokenizer()
lowerCAmelCase__ : Optional[int] = self.get_rust_tokenizer()
lowerCAmelCase__ : List[str] = "I was born in 92000, and this is falsé."
lowerCAmelCase__ : Dict = tokenizer.tokenize(a__ )
lowerCAmelCase__ : str = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
lowerCAmelCase__ : List[Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
lowerCAmelCase__ : Tuple = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
lowerCAmelCase__ : Optional[int] = self.get_rust_tokenizer()
lowerCAmelCase__ : str = tokenizer.encode(a__ )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = AlbertTokenizer(a__ , keep_accents=a__ )
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a__ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [48, 25, 21, 1289] )
lowerCAmelCase__ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
lowerCAmelCase__ : Tuple = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
lowerCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AlbertTokenizer(a__ )
lowerCAmelCase__ : List[str] = tokenizer.encode("sequence builders" )
lowerCAmelCase__ : Optional[int] = tokenizer.encode("multi-sequence build" )
lowerCAmelCase__ : Tuple = tokenizer.build_inputs_with_special_tokens(a__ )
lowerCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Any = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 378
| 0
|
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowercase (snake_case__ : str ) -> Union[str, Any]:
'''simple docstring'''
return x + 2
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = """x = 3"""
lowerCAmelCase = {}
lowerCAmelCase = evaluate(lowerCAmelCase , {} , state=lowerCAmelCase )
assert result == 3
self.assertDictEqual(lowerCAmelCase , {"""x""": 3} )
lowerCAmelCase = """x = y"""
lowerCAmelCase = {"""y""": 5}
lowerCAmelCase = evaluate(lowerCAmelCase , {} , state=lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCAmelCase , {"""x""": 5, """y""": 5} )
def __lowercase ( self : str ):
lowerCAmelCase = """y = add_two(x)"""
lowerCAmelCase = {"""x""": 3}
lowerCAmelCase = evaluate(lowerCAmelCase , {"""add_two""": add_two} , state=lowerCAmelCase )
assert result == 5
self.assertDictEqual(lowerCAmelCase , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCAmelCase = evaluate(lowerCAmelCase , {} , state=lowerCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def __lowercase ( self : List[Any] ):
lowerCAmelCase = """x = 3"""
lowerCAmelCase = {}
lowerCAmelCase = evaluate(lowerCAmelCase , {} , state=lowerCAmelCase )
assert result == 3
self.assertDictEqual(lowerCAmelCase , {"""x""": 3} )
def __lowercase ( self : Dict ):
lowerCAmelCase = """test_dict = {'x': x, 'y': add_two(x)}"""
lowerCAmelCase = {"""x""": 3}
lowerCAmelCase = evaluate(lowerCAmelCase , {"""add_two""": add_two} , state=lowerCAmelCase )
self.assertDictEqual(lowerCAmelCase , {"""x""": 3, """y""": 5} )
self.assertDictEqual(lowerCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = """x = 3\ny = 5"""
lowerCAmelCase = {}
lowerCAmelCase = evaluate(lowerCAmelCase , {} , state=lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCAmelCase , {"""x""": 3, """y""": 5} )
def __lowercase ( self : Dict ):
lowerCAmelCase = """text = f'This is x: {x}.'"""
lowerCAmelCase = {"""x""": 3}
lowerCAmelCase = evaluate(lowerCAmelCase , {} , state=lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowerCAmelCase , {"""x""": 3, """text""": """This is x: 3."""} )
def __lowercase ( self : str ):
lowerCAmelCase = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowerCAmelCase = {"""x""": 3}
lowerCAmelCase = evaluate(lowerCAmelCase , {} , state=lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowerCAmelCase , {"""x""": 3, """y""": 2} )
lowerCAmelCase = {"""x""": 8}
lowerCAmelCase = evaluate(lowerCAmelCase , {} , state=lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCAmelCase , {"""x""": 8, """y""": 5} )
def __lowercase ( self : Any ):
lowerCAmelCase = """test_list = [x, add_two(x)]"""
lowerCAmelCase = {"""x""": 3}
lowerCAmelCase = evaluate(lowerCAmelCase , {"""add_two""": add_two} , state=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , [3, 5] )
self.assertDictEqual(lowerCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
def __lowercase ( self : Tuple ):
lowerCAmelCase = """y = x"""
lowerCAmelCase = {"""x""": 3}
lowerCAmelCase = evaluate(lowerCAmelCase , {} , state=lowerCAmelCase )
assert result == 3
self.assertDictEqual(lowerCAmelCase , {"""x""": 3, """y""": 3} )
def __lowercase ( self : List[Any] ):
lowerCAmelCase = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowerCAmelCase = {"""x""": 3}
lowerCAmelCase = evaluate(lowerCAmelCase , {"""add_two""": add_two} , state=lowerCAmelCase )
assert result == 5
self.assertDictEqual(lowerCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
lowerCAmelCase = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowerCAmelCase = {"""x""": 3}
lowerCAmelCase = evaluate(lowerCAmelCase , {"""add_two""": add_two} , state=lowerCAmelCase )
assert result == 5
self.assertDictEqual(lowerCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def __lowercase ( self : int ):
lowerCAmelCase = """x = 0\nfor i in range(3):\n x = i"""
lowerCAmelCase = {}
lowerCAmelCase = evaluate(lowerCAmelCase , {"""range""": range} , state=lowerCAmelCase )
assert result == 2
self.assertDictEqual(lowerCAmelCase , {"""x""": 2, """i""": 2} )
| 529
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a = logging.get_logger(__name__)
def lowercase (snake_case__ : bool , snake_case__ : bool ) -> Tuple:
'''simple docstring'''
def run_func(snake_case__ : Any ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : Optional[Any] , **snake_case__ : int ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : int , **snake_case__ : Tuple ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowercase (snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> ["tf.Tensor"]:
'''simple docstring'''
lowerCAmelCase = random.Random()
lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 42
_a = 42
_a = "TensorFlow"
@property
def __lowercase ( self : Optional[int] ):
return tf.__version__
def __lowercase ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
# initialize GPU on separate process
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_inference_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self._measure_speed(_inference )
def __lowercase ( self : str , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_train_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self._measure_speed(_train )
def __lowercase ( self : str , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_inference_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self._measure_memory(_inference )
def __lowercase ( self : int , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_train_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self._measure_memory(_train )
def __lowercase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
lowerCAmelCase = (
hasattr(lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
lowerCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = model_cls(lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCAmelCase )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCAmelCase , decoder_input_ids=lowerCAmelCase , training=lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCAmelCase , training=lowerCAmelCase )
lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowercase ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
lowerCAmelCase = (
hasattr(lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
lowerCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = model_cls(lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCAmelCase )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase = model(lowerCAmelCase , decoder_input_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase )[0]
lowerCAmelCase = tf.gradients(lowerCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase )[0]
lowerCAmelCase = tf.gradients(lowerCAmelCase , model.trainable_variables )
return gradients
lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowercase ( self : Optional[int] , lowerCAmelCase : List[str] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase = timeit.repeat(
lowerCAmelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Callable[[], None] ):
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCAmelCase )
lowerCAmelCase = meminfo.used
lowerCAmelCase = Memory(lowerCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
lowerCAmelCase = None
else:
lowerCAmelCase = measure_peak_memory_cpu(lowerCAmelCase )
lowerCAmelCase = Memory(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase = stop_memory_tracing(lowerCAmelCase )
if memory is None:
lowerCAmelCase = summary.total
else:
lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 529
| 1
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
class snake_case__ ( a_ ):
_SCREAMING_SNAKE_CASE : str = CLIPConfig
_SCREAMING_SNAKE_CASE : Any = ["CLIPEncoderLayer"]
def __init__( self : Optional[int] , A__ : CLIPConfig ) -> str:
'''simple docstring'''
super().__init__(A__ )
snake_case_ : Union[str, Any] = CLIPVisionModelWithProjection(config.vision_config )
snake_case_ : Optional[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
snake_case_ : Optional[int] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[Any] , A__ : Optional[int] , A__ : int , A__ : List[str]=0.5 , A__ : List[Any]=0.5 ) -> Any:
'''simple docstring'''
snake_case_ : int = self.vision_model(A__ )[0]
snake_case_ : str = self.p_head(A__ )
snake_case_ : Optional[Any] = nsfw_detected.flatten()
snake_case_ : int = nsfw_detected > p_threshold
snake_case_ : Any = nsfw_detected.tolist()
if any(A__ ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(A__ ):
if nsfw_detected_:
snake_case_ : Optional[int] = np.zeros(images[idx].shape )
snake_case_ : int = self.w_head(A__ )
snake_case_ : List[str] = watermark_detected.flatten()
snake_case_ : str = watermark_detected > w_threshold
snake_case_ : Dict = watermark_detected.tolist()
if any(A__ ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(A__ ):
if watermark_detected_:
snake_case_ : Optional[int] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 666
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__UpperCamelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def a_ ( _A ) -> Any:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
snake_case__ = k.replace(_A , _A )
return k
def a_ ( _A , _A ) -> PegasusForConditionalGeneration:
"""simple docstring"""
snake_case__ = DEFAULTS.copy()
cfg_kwargs.update(_A )
snake_case__ = PegasusConfig(**_A )
snake_case__ = PegasusForConditionalGeneration(_A )
snake_case__ = torch_model.model.state_dict()
snake_case__ = {}
for k, v in tf_weights.items():
snake_case__ = rename_state_dict_key(_A )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
snake_case__ = v.T
snake_case__ = torch.tensor(_A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
snake_case__ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
snake_case__ = mapping['shared.weight']
snake_case__ = mapping['shared.weight']
snake_case__ = {k: torch.zeros_like(_A ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**_A )
snake_case__ , snake_case__ = torch_model.model.load_state_dict(_A , strict=_A )
snake_case__ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def a_ ( _A="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
"""simple docstring"""
snake_case__ = tf.train.list_variables(_A )
snake_case__ = {}
snake_case__ = ['Adafactor', 'global_step']
for name, shape in tqdm(_A , desc='converting tf checkpoint to dict' ):
snake_case__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case__ = tf.train.load_variable(_A , _A )
snake_case__ = array
return tf_weights
def a_ ( _A , _A ) -> List[Any]:
"""simple docstring"""
# save tokenizer first
snake_case__ = Path(_A ).parent.name
snake_case__ = task_specific_params[f'''summarization_{dataset}''']['max_position_embeddings']
snake_case__ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=_A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_A )
# convert model
snake_case__ = get_tf_weights_as_numpy(_A )
snake_case__ = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
snake_case__ = task_specific_params
snake_case__ = convert_pegasus(_A , _A )
torch_model.save_pretrained(_A )
snake_case__ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(_A , Path(_A ) / 'pytorch_model.bin' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
__UpperCamelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
__UpperCamelCase : Any = Path(args.tf_ckpt_path).parent.name
__UpperCamelCase : List[Any] = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 328
| 0
|
from functools import reduce
__lowercase = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str(int(SCREAMING_SNAKE_CASE ) * int(SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) )
for i in range(len(SCREAMING_SNAKE_CASE ) - 12 ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 563
|
import datasets
from .evaluate import evaluate
__lowercase = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
__lowercase = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
__lowercase = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
A_ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
A_ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
A_ = evaluate(dataset=lowerCamelCase__ , predictions=lowerCamelCase__ )
return score
| 563
| 1
|
from __future__ import annotations
def snake_case__ ( lowercase ):
if len(lowercase ) == 0:
return []
lowerCAmelCase_ , lowerCAmelCase_: Union[str, Any] = min(lowercase ), max(lowercase )
lowerCAmelCase_: str = int(max_value - min_value ) + 1
lowerCAmelCase_: list[list] = [[] for _ in range(lowercase )]
for i in my_list:
buckets[int(i - min_value )].append(lowercase )
return [v for bucket in buckets for v in sorted(lowercase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 613
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase__ , "num_attention_heads" ) )
class _lowercase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=64 , lowerCamelCase__=3 , lowerCamelCase__=3 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=16 , lowerCamelCase__=[128, 256, 384] , lowerCamelCase__=[4, 6, 8] , lowerCamelCase__=[2, 3, 4] , lowerCamelCase__=[16, 16, 16] , lowerCamelCase__=0 , lowerCamelCase__=[2, 2, 2] , lowerCamelCase__=[2, 2, 2] , lowerCamelCase__=0.0_2 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=2 , ):
lowerCAmelCase_: int = parent
lowerCAmelCase_: Tuple = batch_size
lowerCAmelCase_: List[str] = image_size
lowerCAmelCase_: Tuple = num_channels
lowerCAmelCase_: Optional[int] = kernel_size
lowerCAmelCase_: int = stride
lowerCAmelCase_: Optional[int] = padding
lowerCAmelCase_: Tuple = hidden_sizes
lowerCAmelCase_: Union[str, Any] = num_attention_heads
lowerCAmelCase_: Tuple = depths
lowerCAmelCase_: Optional[int] = key_dim
lowerCAmelCase_: Optional[Any] = drop_path_rate
lowerCAmelCase_: List[str] = patch_size
lowerCAmelCase_: Any = attention_ratio
lowerCAmelCase_: Tuple = mlp_ratio
lowerCAmelCase_: Any = initializer_range
lowerCAmelCase_: List[str] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowerCAmelCase_: Any = is_training
lowerCAmelCase_: Optional[int] = use_labels
lowerCAmelCase_: Dict = num_labels
lowerCAmelCase_: Any = initializer_range
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_: Tuple = None
if self.use_labels:
lowerCAmelCase_: int = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_: Tuple = self.get_config()
return config, pixel_values, labels
def _a ( self ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: int = LevitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase_: Optional[Any] = model(lowerCamelCase__ )
lowerCAmelCase_: Tuple = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_: Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
lowerCAmelCase_: Union[str, Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowerCAmelCase_: str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: Tuple = self.num_labels
lowerCAmelCase_: str = LevitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase_: Optional[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: Dict = config_and_inputs
lowerCAmelCase_: Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Any = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE: int = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE: Optional[int] = False
SCREAMING_SNAKE_CASE: Optional[Any] = False
SCREAMING_SNAKE_CASE: str = False
SCREAMING_SNAKE_CASE: str = False
SCREAMING_SNAKE_CASE: List[Any] = False
def _a ( self ):
lowerCAmelCase_: List[str] = LevitModelTester(self )
lowerCAmelCase_: Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _a ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _a ( self ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _a ( self ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _a ( self ):
pass
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_: List[str] = model_class(lowerCamelCase__ )
lowerCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_: Any = [*signature.parameters.keys()]
lowerCAmelCase_: str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _a ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: int = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_: Dict = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCAmelCase_: str = outputs.hidden_states
lowerCAmelCase_: List[Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
lowerCAmelCase_: List[Any] = (self.model_tester.image_size, self.model_tester.image_size)
lowerCAmelCase_ , lowerCAmelCase_: int = image_size[0], image_size[1]
for _ in range(4 ):
lowerCAmelCase_: Tuple = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowerCAmelCase_: Optional[int] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowerCAmelCase_ , lowerCAmelCase_: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_: int = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_: Union[str, Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _a ( self ):
pass
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
lowerCAmelCase_: List[str] = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _a ( self ):
if not self.model_tester.is_training:
return
lowerCAmelCase_ , lowerCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_: int = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowerCAmelCase_: Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowerCAmelCase_: Optional[int] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowerCAmelCase_: Tuple = model(**lowerCamelCase__ ).loss
loss.backward()
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase_: Any = False
lowerCAmelCase_: Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowerCAmelCase_: Any = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
lowerCAmelCase_: str = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowerCAmelCase_: List[str] = model(**lowerCamelCase__ ).loss
loss.backward()
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_: Optional[int] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowerCAmelCase_: Tuple = problem_type["title"]
lowerCAmelCase_: Optional[Any] = problem_type["num_labels"]
lowerCAmelCase_: List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowerCAmelCase_: List[Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
lowerCAmelCase_: List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
lowerCAmelCase_: List[Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
lowerCAmelCase_: Dict = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _a ( self ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_: Optional[int] = LevitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( ):
lowerCAmelCase_: Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _a ( self ):
lowerCAmelCase_: Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase__ )
lowerCAmelCase_: str = self.default_image_processor
lowerCAmelCase_: int = prepare_img()
lowerCAmelCase_: Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_: Tuple = model(**lowerCamelCase__ )
# verify the logits
lowerCAmelCase_: str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 613
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def snake_case (__lowercase ) -> List[Any]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def snake_case (__lowercase ) -> List[str]:
'''simple docstring'''
_snake_case : List[Any] = create_tensor(__lowercase )
_snake_case : Optional[int] = gather(__lowercase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
_snake_case : Dict = [state.process_index]
_snake_case : Optional[int] = gather_object(__lowercase )
assert len(__lowercase ) == state.num_processes, F"""{gathered_obj}, {len(__lowercase )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Union[str, Any] = create_tensor(__lowercase )
_snake_case : Optional[int] = broadcast(__lowercase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if state.is_main_process:
_snake_case : List[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_snake_case : Dict = torch.arange(state.num_processes ).to(state.device )
_snake_case : int = pad_across_processes(__lowercase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
_snake_case : Optional[int] = create_tensor(__lowercase )
_snake_case : List[Any] = reduce(__lowercase , "sum" )
_snake_case : int = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__lowercase , __lowercase ), F"""{reduced_tensor} != {truth_tensor}"""
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
_snake_case : List[Any] = create_tensor(__lowercase )
_snake_case : Any = reduce(__lowercase , "mean" )
_snake_case : Any = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__lowercase , __lowercase ), F"""{reduced_tensor} != {truth_tensor}"""
def snake_case (__lowercase ) -> Union[str, Any]:
'''simple docstring'''
main()
def snake_case () -> Tuple:
'''simple docstring'''
_snake_case : List[str] = PartialState()
state.print(F"""State: {state}""" )
state.print("testing gather" )
test_gather(__lowercase )
state.print("testing gather_object" )
test_gather_object(__lowercase )
state.print("testing broadcast" )
test_broadcast(__lowercase )
state.print("testing pad_across_processes" )
test_pad_across_processes(__lowercase )
state.print("testing reduce_sum" )
test_reduce_sum(__lowercase )
state.print("testing reduce_mean" )
test_reduce_mean(__lowercase )
if __name__ == "__main__":
main()
| 710
|
def snake_case (__lowercase , __lowercase ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def snake_case () -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 580
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __A ( A_ ):
UpperCamelCase :List[str] = '''gpt_neo'''
UpperCamelCase :Tuple = ['''past_key_values''']
UpperCamelCase :Optional[int] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__(self , __magic_name__=50257 , __magic_name__=2048 , __magic_name__=2048 , __magic_name__=24 , __magic_name__=[[["global", "local"], 12]] , __magic_name__=16 , __magic_name__=None , __magic_name__=256 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1E-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=50256 , __magic_name__=50256 , **__magic_name__ , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : List[Any] = num_layers
lowerCamelCase__ : List[Any] = num_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = window_size
lowerCamelCase__ : List[Any] = activation_function
lowerCamelCase__ : Any = resid_dropout
lowerCamelCase__ : Dict = embed_dropout
lowerCamelCase__ : str = attention_dropout
lowerCamelCase__ : str = classifier_dropout
lowerCamelCase__ : str = layer_norm_epsilon
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : int = use_cache
lowerCamelCase__ : List[Any] = bos_token_id
lowerCamelCase__ : int = eos_token_id
lowerCamelCase__ : str = attention_types
lowerCamelCase__ : List[str] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def _snake_case (__magic_name__ ):
lowerCamelCase__ : Optional[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A (UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ) ->int:
'''simple docstring'''
import torch
lowerCamelCase__ : Any = input.size()
lowerCamelCase__ : Tuple = len(UpperCamelCase )
lowerCamelCase__ : str = shape[dimension]
lowerCamelCase__ : Optional[int] = torch.arange(0 , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[Any] = torch.div(sizedim - size , UpperCamelCase , rounding_mode="""floor""" ) + 1
lowerCamelCase__ : Tuple = torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase__ : Dict = [slice(UpperCamelCase )] * rank
lowerCamelCase__ : Union[str, Any] = indices
lowerCamelCase__ : Optional[int] = input[s]
lowerCamelCase__ : int = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def _A (UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ) ->Tuple:
'''simple docstring'''
import torch
lowerCamelCase__ : List[Any] = torch.arange(1 , UpperCamelCase )
lowerCamelCase__ : Any = torch.remainder(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[int] = remainders == 0
lowerCamelCase__ : List[str] = candidates[divisor_indices]
lowerCamelCase__ : List[Any] = torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="""floor""" )
class __A ( A_ ):
@property
def _snake_case (self ):
lowerCamelCase__ : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase__ : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ : int = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _snake_case (self ):
return self._config.num_heads
def _snake_case (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase__ : Union[str, Any] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ ,lowerCamelCase__ : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Any = seqlen + 2
lowerCamelCase__ : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ : Dict = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase__ : Dict = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ : int = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def _snake_case (self ):
return 13
| 157
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase :Tuple = IFInpaintingSuperResolutionPipeline
UpperCamelCase :int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
UpperCamelCase :Optional[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _snake_case (self ):
return self._get_superresolution_dummy_components()
def _snake_case (self , __magic_name__ , __magic_name__=0 ):
if str(__magic_name__ ).startswith("""mps""" ):
lowerCamelCase__ : Dict = torch.manual_seed(__magic_name__ )
else:
lowerCamelCase__ : Tuple = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCamelCase__ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case (self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _snake_case (self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case (self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _snake_case (self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _snake_case (self ):
self._test_save_load_local()
def _snake_case (self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 157
| 1
|
"""simple docstring"""
import math
import os
import sys
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = ""
try:
with open(lowerCAmelCase_ , 'rb' ) as binary_file:
_lowerCamelCase : Any = binary_file.read()
for dat in data:
_lowerCamelCase : Tuple = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lexicon.pop(lowerCAmelCase_ )
_lowerCamelCase : int = last_match_id
if math.loga(lowerCAmelCase_ ).is_integer():
for curr_key in lexicon:
_lowerCamelCase : Optional[int] = "0" + lexicon[curr_key]
_lowerCamelCase : int = bin(lowerCAmelCase_ )[2:]
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = {"0": "0", "1": "1"}
_lowerCamelCase : Dict = "", ""
_lowerCamelCase : Tuple = len(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCamelCase : Union[str, Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
index += 1
_lowerCamelCase : Tuple = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_lowerCamelCase : Optional[int] = lexicon[curr_string]
result += last_match_id
return result
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : List[Any] = os.path.getsize(lowerCAmelCase_ )
_lowerCamelCase : Dict = bin(lowerCAmelCase_ )[2:]
_lowerCamelCase : int = len(lowerCAmelCase_ )
return "0" * (length_length - 1) + file_length_binary + compressed
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Any = 8
try:
with open(lowerCAmelCase_ , 'wb' ) as opened_file:
_lowerCamelCase : int = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCAmelCase_ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : List[str] = read_file_binary(lowerCAmelCase_ )
_lowerCamelCase : str = compress_data(lowerCAmelCase_ )
_lowerCamelCase : Tuple = add_file_length(lowerCAmelCase_ , lowerCAmelCase_ )
write_file_binary(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 707
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=1 / 255 , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : Union[str, Any] = min_resolution
_lowerCamelCase : List[Any] = max_resolution
_lowerCamelCase : Union[str, Any] = do_resize
_lowerCamelCase : str = size
_lowerCamelCase : List[str] = do_rescale
_lowerCamelCase : Optional[int] = rescale_factor
_lowerCamelCase : str = do_normalize
_lowerCamelCase : Optional[int] = image_mean
_lowerCamelCase : Tuple = image_std
_lowerCamelCase : Tuple = do_pad
def A_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def A_ ( self , lowercase , lowercase=False ):
if not batched:
_lowerCamelCase : Union[str, Any] = image_inputs[0]
if isinstance(lowercase , Image.Image ):
_lowerCamelCase, _lowerCamelCase : Optional[Any] = image.size
else:
_lowerCamelCase, _lowerCamelCase : List[str] = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase : List[Any] = int(self.size['shortest_edge'] * h / w )
_lowerCamelCase : Union[str, Any] = self.size['shortest_edge']
elif w > h:
_lowerCamelCase : List[str] = self.size['shortest_edge']
_lowerCamelCase : Optional[Any] = int(self.size['shortest_edge'] * w / h )
else:
_lowerCamelCase : int = self.size['shortest_edge']
_lowerCamelCase : Optional[Any] = self.size['shortest_edge']
else:
_lowerCamelCase : Union[str, Any] = []
for image in image_inputs:
_lowerCamelCase, _lowerCamelCase : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase : Tuple = max(lowercase , key=lambda lowercase : item[0] )[0]
_lowerCamelCase : Any = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DetrImageProcessor if is_vision_available() else None
def A_ ( self ):
_lowerCamelCase : int = DetrImageProcessingTester(self )
@property
def A_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self ):
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , 'image_mean' ) )
self.assertTrue(hasattr(lowercase , 'image_std' ) )
self.assertTrue(hasattr(lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase , 'do_rescale' ) )
self.assertTrue(hasattr(lowercase , 'rescale_factor' ) )
self.assertTrue(hasattr(lowercase , 'do_resize' ) )
self.assertTrue(hasattr(lowercase , 'size' ) )
self.assertTrue(hasattr(lowercase , 'do_pad' ) )
def A_ ( self ):
_lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
_lowerCamelCase : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowercase )
def A_ ( self ):
pass
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase, _lowerCamelCase : str = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
_lowerCamelCase : Union[str, Any] = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase : str = image_processing(lowercase , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
_lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase : Dict = image_processing(lowercase , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A_ ( self ):
# prepare image and target
_lowerCamelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_lowerCamelCase : int = json.loads(f.read() )
_lowerCamelCase : str = {'image_id': 39769, 'annotations': target}
# encode them
_lowerCamelCase : str = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
_lowerCamelCase : int = image_processing(images=lowercase , annotations=lowercase , return_tensors='pt' )
# verify pixel values
_lowerCamelCase : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase )
_lowerCamelCase : List[str] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
_lowerCamelCase : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) )
# verify boxes
_lowerCamelCase : int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase )
_lowerCamelCase : Tuple = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) )
# verify image_id
_lowerCamelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) )
# verify is_crowd
_lowerCamelCase : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) )
# verify class_labels
_lowerCamelCase : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) )
# verify orig_size
_lowerCamelCase : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) )
# verify size
_lowerCamelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) )
@slow
def A_ ( self ):
# prepare image, target and masks_path
_lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_lowerCamelCase : List[Any] = json.loads(f.read() )
_lowerCamelCase : str = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_lowerCamelCase : List[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowerCamelCase : List[Any] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
_lowerCamelCase : Dict = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors='pt' )
# verify pixel values
_lowerCamelCase : Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase )
_lowerCamelCase : List[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
_lowerCamelCase : Optional[Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) )
# verify boxes
_lowerCamelCase : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase )
_lowerCamelCase : Union[str, Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) )
# verify image_id
_lowerCamelCase : List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) )
# verify is_crowd
_lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) )
# verify class_labels
_lowerCamelCase : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) )
# verify masks
_lowerCamelCase : List[str] = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase )
# verify orig_size
_lowerCamelCase : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) )
# verify size
_lowerCamelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) )
| 492
| 0
|
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _snake_case ( A_ : np.ndarray , A_ : np.ndarray , A_ : np.ndarray , A_ : int , A_ : int ):
"""simple docstring"""
a_ : List[Any] = cva.getAffineTransform(_snake_case , _snake_case )
return cva.warpAffine(_snake_case , _snake_case , (rows, cols) )
if __name__ == "__main__":
# read original image
__snake_case: Dict = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__snake_case: Union[str, Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__snake_case: Any = gray_img.shape
# set different points to rotate image
__snake_case: List[str] = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
__snake_case: str = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
__snake_case: List[Any] = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
__snake_case: List[Any] = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
__snake_case: Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__snake_case: List[Any] = plt.figure(1)
__snake_case: int = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 577
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case__ ( _snake_case : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case__ ( _snake_case : str , _snake_case : List[str] , _snake_case : List[str] ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def snake_case__ ( _snake_case : List[str] , _snake_case : str , _snake_case : List[str] ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader(_snake_case , features=_snake_case , cache_dir=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def snake_case__ ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Any ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader(_snake_case , features=_snake_case , cache_dir=_snake_case ).read()
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def snake_case__ ( _snake_case : List[str] , _snake_case : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
UpperCamelCase__ = features.copy()
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = JsonDatasetReader(_snake_case , features=_snake_case , cache_dir=_snake_case ).read()
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case__ ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Tuple ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case , split=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def snake_case__ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
if issubclass(_snake_case , _snake_case ):
UpperCamelCase__ = jsonl_path
elif issubclass(_snake_case , _snake_case ):
UpperCamelCase__ = [jsonl_path]
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
def snake_case__ ( _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Dict=("train",) ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case )
for split in splits:
UpperCamelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : int ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ = JsonDatasetReader({"train": jsonl_path} , cache_dir=_snake_case , keep_in_memory=_snake_case ).read()
_check_json_datasetdict(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def snake_case__ ( _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader({"train": jsonl_path} , features=_snake_case , cache_dir=_snake_case ).read()
_check_json_datasetdict(_snake_case , _snake_case )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Tuple ):
"""simple docstring"""
if split:
UpperCamelCase__ = {split: jsonl_path}
else:
UpperCamelCase__ = "train"
UpperCamelCase__ = {"train": jsonl_path, "test": jsonl_path}
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case ).read()
_check_json_datasetdict(_snake_case , _snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
return json.load(_snake_case )
def snake_case__ ( _snake_case : Union[str, Any] ):
"""simple docstring"""
return [json.loads(_snake_case ) for line in buffer]
class lowerCAmelCase :
'''simple docstring'''
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] ) -> int:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json_function(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(exported_content[0] , lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def lowerCamelCase__ ( self :str , lowerCamelCase_ :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :List[str] ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , orient=lowerCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json_function(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(exported_content[0] , lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :str , lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> Optional[Any]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , orient=lowerCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase_ ) == 1_0
def lowerCamelCase__ ( self :str , lowerCamelCase_ :Any ) -> Any:
"""simple docstring"""
with pytest.raises(lowerCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp("data" ) / f'test.json.{extension}'
UpperCamelCase__ = str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , compression=lowerCamelCase_ ).write()
with fsspec.open(lowerCamelCase_ , "rb" , compression="infer" ) as f:
UpperCamelCase__ = f.read()
with fsspec.open(lowerCamelCase_ , "rb" , compression="infer" ) as f:
UpperCamelCase__ = f.read()
assert exported_content == original_content
| 516
| 0
|
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_SCREAMING_SNAKE_CASE : str = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __lowercase :
'''simple docstring'''
def __init__(self ) -> List[str]:
'''simple docstring'''
__lowercase = WATERMARK_BITS
__lowercase = WatermarkEncoder()
self.encoder.set_watermark('''bits''' ,self.watermark )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
if images.shape[-1] < 256:
return images
__lowercase = (255 * (images / 2 + 0.5)).cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
__lowercase = [self.encoder.encode(_lowerCamelCase ,'''dwtDct''' ) for image in images]
__lowercase = torch.from_numpy(np.array(_lowerCamelCase ) ).permute(0 ,3 ,1 ,2 )
__lowercase = torch.clamp(2 * (images / 255 - 0.5) ,min=-1.0 ,max=1.0 )
return images
| 716
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 0
|
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : list[list[int]] =[]
create_all_state(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , [] , SCREAMING_SNAKE_CASE )
return result
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[list[int]] , ):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE , level - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_list.pop()
def _A ( SCREAMING_SNAKE_CASE : list[list[int]] ):
"""simple docstring"""
for i in total_list:
print(*SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase : Any = 4
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : List[str] = generate_all_combinations(n, k)
print_all_state(total_list)
| 563
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = (EulerDiscreteScheduler,)
_lowercase : List[str] = 10
def _lowercase ( self , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ : Any ={
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase__ )
return config
def _lowercase ( self ) -> Dict:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _lowercase ( self ) -> str:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] =self.scheduler_classes[0]
a__ : List[Any] =self.get_scheduler_config()
a__ : Optional[Any] =scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : Tuple =torch.manual_seed(0 )
a__ : List[Any] =self.dummy_model()
a__ : Any =self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Optional[int] =sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Union[str, Any] =scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Any =model(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
a__ : int =output.prev_sample
a__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : int =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] =self.scheduler_classes[0]
a__ : Dict =self.get_scheduler_config(prediction_type="v_prediction" )
a__ : str =scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : Tuple =torch.manual_seed(0 )
a__ : Dict =self.dummy_model()
a__ : List[str] =self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Tuple =sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : List[str] =scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Any =model(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[str] =scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
a__ : Optional[Any] =output.prev_sample
a__ : Optional[Any] =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : List[Any] =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 0.00_02 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[Any] =self.scheduler_classes[0]
a__ : Dict =self.get_scheduler_config()
a__ : str =scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
a__ : Tuple =torch.manual_seed(0 )
a__ : Any =self.dummy_model()
a__ : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a__ : Optional[Any] =sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
a__ : Dict =scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =model(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : str =scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
a__ : Union[str, Any] =output.prev_sample
a__ : List[str] =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : Union[str, Any] =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =self.scheduler_classes[0]
a__ : List[Any] =self.get_scheduler_config()
a__ : List[Any] =scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
a__ : int =torch.manual_seed(0 )
a__ : Dict =self.dummy_model()
a__ : int =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a__ : str =sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
a__ : Tuple =scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =model(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Dict =scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
a__ : Tuple =output.prev_sample
a__ : int =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : Any =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1E-3
| 563
| 1
|
"""simple docstring"""
import sys
lowerCAmelCase_ = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def __lowerCamelCase ( SCREAMING_SNAKE_CASE = N ) -> int:
"""simple docstring"""
_UpperCAmelCase = -sys.maxsize - 1
_UpperCAmelCase = n[:13]
_UpperCAmelCase = 13
while cur_index < len(SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_UpperCAmelCase = substr[1:] + n[cur_index]
cur_index += 1
else:
_UpperCAmelCase = max(SCREAMING_SNAKE_CASE,str_eval(SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 710
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 494
| 0
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = torch.nn.Linear(1_0 , 1_0)
__SCREAMING_SNAKE_CASE = torch.optim.SGD(model.parameters() , 0.1)
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE = accelerator.prepare(lowerCAmelCase_)
try:
pickle.loads(pickle.dumps(lowerCAmelCase_))
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}")
AcceleratorState._reset_state()
| 155
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def lowercase__ ( ):
_SCREAMING_SNAKE_CASE : dict[int, int] = {}
_SCREAMING_SNAKE_CASE : List[Any] = 2
while True:
_SCREAMING_SNAKE_CASE : List[Any] = factor_map.pop(lowerCamelCase, lowerCamelCase )
if factor:
_SCREAMING_SNAKE_CASE : str = factor + prime
while x in factor_map:
x += factor
_SCREAMING_SNAKE_CASE : Union[str, Any] = factor
else:
_SCREAMING_SNAKE_CASE : Optional[int] = prime
yield prime
prime += 1
def lowercase__ ( lowerCamelCase = 1E10 ):
_SCREAMING_SNAKE_CASE : Dict = sieve()
_SCREAMING_SNAKE_CASE : Dict = 1
while True:
_SCREAMING_SNAKE_CASE : int = next(lowerCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 621
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_lowerCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
a = ['''audio_values''', '''audio_mask''']
def __init__( self , a__=2048 , a__=1 , a__=[16, 16] , a__=128 , a__=44100 , a__=86 , a__=2048 , a__=0.0 , **a__ , ):
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , **a__ , )
A_ : Dict = spectrogram_length
A_ : Tuple = num_channels
A_ : Any = patch_size
A_ : str = feature_size // self.patch_size[1]
A_ : List[Any] = n_fft
A_ : Optional[Any] = sampling_rate // hop_length_to_sampling_rate
A_ : List[Any] = sampling_rate
A_ : Optional[Any] = padding_value
A_ : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=a__ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _lowerCamelCase ( self , a__ ):
A_ : List[Any] = spectrogram(
a__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
A_ : Dict = log_spec[:, :-1]
A_ : str = log_spec - 20.0
A_ : List[Any] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , a__ , a__ = None , a__ = True , a__ = None , a__ = False , a__ = False , **a__ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : str = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
A_ : List[str] = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
A_ : Dict = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Optional[int] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
A_ : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a__ ):
A_ : Dict = [np.asarray(a__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
A_ : Dict = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
A_ : List[str] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
A_ : List[Any] = np.array(a__ ).astype(np.floataa )
# convert into correct format for padding
A_ : int = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
A_ : Optional[int] = np.ones([len(a__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
A_ : str = padded_audio_features * self.padding_value
for i in range(len(a__ ) ):
A_ : Union[str, Any] = audio_features[i]
A_ : str = feature
# return as BatchFeature
if return_attention_mask:
A_ : Any = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
A_ : Optional[Any] = {"""audio_values""": padded_audio_features}
A_ : List[Any] = BatchFeature(data=a__ , tensor_type=a__ )
return encoded_inputs
| 711
|
from typing import List
from .keymap import KEYMAP, get_character
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
def decorator(_lowerCAmelCase ):
A_ : List[Any] = getattr(_lowerCAmelCase ,"""handle_key""" ,[] )
handle += [key]
setattr(_lowerCAmelCase ,"""handle_key""" ,_lowerCAmelCase )
return func
return decorator
def _lowerCAmelCase ( *_lowerCAmelCase ):
'''simple docstring'''
def decorator(_lowerCAmelCase ):
A_ : Tuple = getattr(_lowerCAmelCase ,"""handle_key""" ,[] )
handle += keys
setattr(_lowerCAmelCase ,"""handle_key""" ,_lowerCAmelCase )
return func
return decorator
class _UpperCAmelCase ( _lowerCamelCase ):
def __new__( cls , a__ , a__ , a__ ):
A_ : Any = super().__new__(cls , a__ , a__ , a__ )
if not hasattr(a__ , """key_handler""" ):
setattr(a__ , """key_handler""" , {} )
setattr(a__ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
A_ : Tuple = getattr(a__ , """handle_key""" , [] )
for key in handled_keys:
A_ : Optional[Any] = value
return new_cls
@staticmethod
def _lowerCamelCase ( cls ):
A_ : List[str] = get_character()
if char != KEYMAP["undefined"]:
A_ : str = ord(a__ )
A_ : List[str] = cls.key_handler.get(a__ )
if handler:
A_ : Optional[int] = char
return handler(cls )
else:
return None
def _lowerCAmelCase ( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
| 481
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Optional[Any] =logging.get_logger(__name__)
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : List[str] = 'encoder-decoder'
A_ : Dict = True
def __init__( self : Dict , **__UpperCamelCase : str ) -> Any:
super().__init__(**__UpperCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A = kwargs.pop('encoder' )
A = encoder_config.pop('model_type' )
A = kwargs.pop('decoder' )
A = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
A = AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
A = AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
A = True
@classmethod
def __UpperCamelCase ( cls : Optional[int] , __UpperCamelCase : PretrainedConfig , __UpperCamelCase : PretrainedConfig , **__UpperCamelCase : List[str] ) -> PretrainedConfig:
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
A = True
A = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__UpperCamelCase )
def __UpperCamelCase ( self : Dict ) -> List[str]:
A = copy.deepcopy(self.__dict__ )
A = self.encoder.to_dict()
A = self.decoder.to_dict()
A = self.__class__.model_type
return output
| 106
|
__UpperCamelCase : Optional[int] = 'Input must be a string of 8 numbers plus letter'
__UpperCamelCase : Optional[Any] = 'TRWAGMYFPDXBNJZSQVHLCKE'
def _UpperCAmelCase ( UpperCAmelCase : str ):
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Union[str, Any] = f"""Expected string as input, found {type(UpperCAmelCase ).__name__}"""
raise TypeError(UpperCAmelCase )
__lowerCamelCase : List[Any] = spanish_id.replace("""-""" , """""" ).upper()
if len(UpperCAmelCase ) != 9:
raise ValueError(UpperCAmelCase )
try:
__lowerCamelCase : Tuple = int(spanish_id_clean[0:8] )
__lowerCamelCase : Optional[int] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCAmelCase ) from ex
if letter.isdigit():
raise ValueError(UpperCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 519
| 0
|
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : Optional[int]=False , **__lowerCamelCase : Dict ) -> Dict:
super().__init__(**__lowerCamelCase )
__magic_name__ = vocab_size
__magic_name__ = d_embed
__magic_name__ = d_proj
__magic_name__ = cutoffs + [vocab_size]
__magic_name__ = [0] + self.cutoffs
__magic_name__ = div_val
__magic_name__ = self.cutoffs[0]
__magic_name__ = len(self.cutoffs ) - 1
__magic_name__ = self.shortlist_size + self.n_clusters
__magic_name__ = keep_order
__magic_name__ = []
__magic_name__ = []
def _snake_case ( self : Optional[int] , __lowerCamelCase : str ) -> Optional[int]:
if self.n_clusters > 0:
__magic_name__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=__lowerCamelCase , name="cluster_weight" )
__magic_name__ = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=__lowerCamelCase , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__magic_name__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_projs_._{i}''' , )
self.out_projs.append(__lowerCamelCase )
else:
self.out_projs.append(__lowerCamelCase )
__magic_name__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
__magic_name__ = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__magic_name__ , __magic_name__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__magic_name__ = self.d_embed // (self.div_val**i)
__magic_name__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_projs_._{i}''' )
self.out_projs.append(__lowerCamelCase )
__magic_name__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
__magic_name__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__lowerCamelCase )
@staticmethod
def _snake_case ( __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]:
__magic_name__ = x
if proj is not None:
__magic_name__ = tf.einsum("ibd,ed->ibe" , __lowerCamelCase , __lowerCamelCase )
return tf.einsum("ibd,nd->ibn" , __lowerCamelCase , __lowerCamelCase ) + b
@staticmethod
def _snake_case ( __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> Dict:
__magic_name__ = shape_list(__lowerCamelCase )
__magic_name__ = tf.range(lp_size[0] , dtype=target.dtype )
__magic_name__ = tf.stack([r, target] , 1 )
return tf.gather_nd(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=False ) -> List[Any]:
__magic_name__ = 0
if self.n_clusters == 0:
__magic_name__ = self._logit(__lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__magic_name__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowerCamelCase , logits=__lowerCamelCase )
__magic_name__ = tf.nn.log_softmax(__lowerCamelCase , axis=-1 )
else:
__magic_name__ = shape_list(__lowerCamelCase )
__magic_name__ = []
__magic_name__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__magic_name__ , __magic_name__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__magic_name__ = (target >= l_idx) & (target < r_idx)
__magic_name__ = tf.where(__lowerCamelCase )
__magic_name__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase ) - l_idx
if self.div_val == 1:
__magic_name__ = self.out_layers[0][0][l_idx:r_idx]
__magic_name__ = self.out_layers[0][1][l_idx:r_idx]
else:
__magic_name__ = self.out_layers[i][0]
__magic_name__ = self.out_layers[i][1]
if i == 0:
__magic_name__ = tf.concat([cur_W, self.cluster_weight] , 0 )
__magic_name__ = tf.concat([cur_b, self.cluster_bias] , 0 )
__magic_name__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[0] )
__magic_name__ = tf.nn.log_softmax(__lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__magic_name__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
else:
__magic_name__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[i] )
__magic_name__ = tf.nn.log_softmax(__lowerCamelCase )
__magic_name__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
__magic_name__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__lowerCamelCase )
if target is not None:
__magic_name__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__lowerCamelCase , -cur_logprob , shape_list(__lowerCamelCase ) )
__magic_name__ = tf.concat(__lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
__magic_name__ = tf.reduce_mean(__lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__lowerCamelCase , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 468
|
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:int ):
'''simple docstring'''
__magic_name__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 468
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 134
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( a__ , unittest.TestCase):
_lowerCAmelCase = KandinskyVaaPipeline
_lowerCAmelCase = [
'''image_embeds''',
'''negative_image_embeds''',
]
_lowerCAmelCase = ['''image_embeds''', '''negative_image_embeds''']
_lowerCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowerCAmelCase = False
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : List[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase : Tuple = UNetaDConditionModel(**A )
return model
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.dummy_unet
lowerCamelCase : int = self.dummy_movq
lowerCamelCase : Any = DDIMScheduler(
num_train_timesteps=1000, beta_schedule='linear', beta_start=0.0_0085, beta_end=0.012, clip_sample=A, set_alpha_to_one=A, steps_offset=1, prediction_type='epsilon', thresholding=A, )
lowerCamelCase : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCAmelCase_ ( self, A, A=0 ):
"""simple docstring"""
lowerCamelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(A ) ).to(A )
lowerCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
A )
if str(A ).startswith('mps' ):
lowerCamelCase : str = torch.manual_seed(A )
else:
lowerCamelCase : int = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase : Dict = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = 'cpu'
lowerCamelCase : List[Any] = self.get_dummy_components()
lowerCamelCase : List[str] = self.pipeline_class(**A )
lowerCamelCase : Optional[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Tuple = pipe(**self.get_dummy_inputs(A ) )
lowerCamelCase : str = output.images
lowerCamelCase : List[str] = pipe(
**self.get_dummy_inputs(A ), return_dict=A, )[0]
lowerCamelCase : Any = image[0, -3:, -3:, -1]
lowerCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase : int = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
lowerCamelCase : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(A )
lowerCamelCase : Optional[int] = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder', torch_dtype=torch.floataa )
lowerCamelCase : Tuple = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
lowerCamelCase : Dict = 'red cat, 4k photo'
lowerCamelCase : Optional[int] = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCamelCase , lowerCamelCase : Union[str, Any] = pipe_prior(
A, generator=A, num_inference_steps=5, negative_prompt='', ).to_tuple()
lowerCamelCase : Tuple = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCamelCase : List[str] = pipeline(
image_embeds=A, negative_image_embeds=A, generator=A, num_inference_steps=100, output_type='np', )
lowerCamelCase : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A, A )
| 320
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
def __lowerCAmelCase ( a_ , a_=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __lowerCAmelCase ( a_ , a_ , a_=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE : Optional[int] = ''
else:
SCREAMING_SNAKE_CASE : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE : int = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : str = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( a_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a_ , a_ )
def __lowerCAmelCase ( a_ , a_ , a_ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = dct.pop(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def __lowerCAmelCase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE : str = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( a_ , a_ , a_=True ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTConfig()
# patch_size
if model_name[-1] == "8":
SCREAMING_SNAKE_CASE : Tuple = 8
# set labels if required
if not base_model:
SCREAMING_SNAKE_CASE : Dict = 1000
SCREAMING_SNAKE_CASE : Optional[Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE : List[str] = {int(a_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
SCREAMING_SNAKE_CASE : Dict = 384
SCREAMING_SNAKE_CASE : str = 1536
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Any = 6
# load original model from torch hub
SCREAMING_SNAKE_CASE : Tuple = torch.hub.load('facebookresearch/dino:main' , a_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE : Dict = original_model.state_dict()
if base_model:
remove_classification_head_(a_ )
SCREAMING_SNAKE_CASE : Dict = create_rename_keys(a_ , base_model=a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
read_in_q_k_v(a_ , a_ , a_ )
# load HuggingFace model
if base_model:
SCREAMING_SNAKE_CASE : Optional[Any] = ViTModel(a_ , add_pooling_layer=a_ ).eval()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ViTForImageClassification(a_ ).eval()
model.load_state_dict(a_ )
# Check outputs on an image, prepared by ViTImageProcessor
SCREAMING_SNAKE_CASE : int = ViTImageProcessor()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=prepare_img() , return_tensors='pt' )
SCREAMING_SNAKE_CASE : List[str] = encoding['pixel_values']
SCREAMING_SNAKE_CASE : Optional[int] = model(a_ )
if base_model:
SCREAMING_SNAKE_CASE : int = original_model(a_ )
assert torch.allclose(a_ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
SCREAMING_SNAKE_CASE : Any = original_model(a_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3 )
Path(a_ ).mkdir(exist_ok=a_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
_lowerCAmelCase :Dict = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 179
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase :
'''simple docstring'''
snake_case__ : Dict = LEDConfig
snake_case__ : Dict = {}
snake_case__ : int = "gelu"
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : List[Any] = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = eos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Tuple = bos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE : Optional[int] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : str = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE : Optional[int] = global_attention_mask
return config, inputs_dict
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = TFLEDModel(config=lowercase__ ).get_decoder()
SCREAMING_SNAKE_CASE : int = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE : Tuple = input_ids[:1, :]
SCREAMING_SNAKE_CASE : Optional[int] = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE : Optional[Any] = 1
# first forward pass
SCREAMING_SNAKE_CASE : Any = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : int = model(lowercase__ , attention_mask=lowercase__ )[0]
SCREAMING_SNAKE_CASE : List[Any] = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : Tuple = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def __lowerCAmelCase ( a_ , a_ , a_ , a_=None , a_=None , a_=None , a_=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE : int = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
snake_case__ : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
snake_case__ : Union[str, Any] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case__ : List[Any] = True
snake_case__ : Tuple = False
snake_case__ : Dict = False
snake_case__ : int = False
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowercase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.zeros_like(inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.seq_length
SCREAMING_SNAKE_CASE : str = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
SCREAMING_SNAKE_CASE : List[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Tuple = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def _UpperCamelCase ( self ) -> Tuple:
pass
def _UpperCamelCase ( self ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def __lowerCAmelCase ( a_ ) -> Any:
'''simple docstring'''
return tf.constant(a_ , dtype=tf.intaa )
_lowerCAmelCase :Union[str, Any] = 1E-4
@slow
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Any = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
SCREAMING_SNAKE_CASE : Optional[Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : List[str] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : Any = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = model(**lowercase__ )[0]
SCREAMING_SNAKE_CASE : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : str = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
SCREAMING_SNAKE_CASE : List[Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : str = model(**lowercase__ )[0]
SCREAMING_SNAKE_CASE : Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 179
| 1
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase : List[Any] = "src/transformers"
_lowerCAmelCase : Optional[int] = "docs/source/en/tasks"
def __snake_case ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
'''simple docstring'''
with open(A__ , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase : Dict = f.readlines()
# Find the start prompt.
_UpperCAmelCase : str = 0
while not lines[start_index].startswith(A__ ):
start_index += 1
start_index += 1
_UpperCAmelCase : Any = start_index
while not lines[end_index].startswith(A__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase : Any = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase : List[str] = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = TASK_GUIDE_TO_MODELS[task_guide]
_UpperCAmelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A__ , set() )
_UpperCAmelCase : List[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def __snake_case ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = _find_text_in_file(
filename=os.path.join(A__ , A__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
_UpperCAmelCase : Optional[Any] = get_model_list_for_task(A__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(A__ , A__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowerCAmelCase : List[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 289
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """gptj"""
_SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCamelCase__ : List[Any]=5_0_4_0_0 , UpperCamelCase__ : int=2_0_4_8 , UpperCamelCase__ : Dict=4_0_9_6 , UpperCamelCase__ : Dict=2_8 , UpperCamelCase__ : str=1_6 , UpperCamelCase__ : Union[str, Any]=6_4 , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]="gelu_new" , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : str=1E-5 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Union[str, Any]=5_0_2_5_6 , UpperCamelCase__ : int=5_0_2_5_6 , UpperCamelCase__ : int=False , **UpperCamelCase__ : Tuple , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_embd
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = n_inner
UpperCamelCase = rotary_dim
UpperCamelCase = activation_function
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = attn_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : PretrainedConfig , UpperCamelCase__ : str = "default" , UpperCamelCase__ : List[PatchingSpec] = None , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , 'pad_token_id' , UpperCamelCase__ ):
# TODO: how to do that better?
UpperCamelCase = 0
@property
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A ( self : List[str] ):
"""simple docstring"""
return self._config.n_layer
@property
def A ( self : str ):
"""simple docstring"""
return self._config.n_head
def A ( self : List[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCamelCase = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
UpperCamelCase = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase = ordered_inputs['attention_mask'].dtype
UpperCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def A ( self : int ):
"""simple docstring"""
return 1_3
| 430
| 0
|
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase_ ( snake_case_ ):
@slow
@require_torch
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
SCREAMING_SNAKE_CASE : str = BertTokenizer.from_pretrained('''bert-base-uncased''' )
SCREAMING_SNAKE_CASE : int = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE : Dict = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE : Any = 1_28
SCREAMING_SNAKE_CASE : List[Any] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE : Any = val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE : Optional[Any] = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase__ : str ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE : List[str] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=lowerCAmelCase__ , max_length=5_12 )
SCREAMING_SNAKE_CASE : Any = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=lowerCAmelCase__ , max_length=1_28 )
SCREAMING_SNAKE_CASE : str = inputs.input_ids
SCREAMING_SNAKE_CASE : Tuple = inputs.attention_mask
SCREAMING_SNAKE_CASE : List[str] = outputs.input_ids
SCREAMING_SNAKE_CASE : int = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE : List[str] = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
SCREAMING_SNAKE_CASE : Any = outputs.attention_mask
assert all(len(lowerCAmelCase__ ) == 5_12 for x in inputs.input_ids )
assert all(len(lowerCAmelCase__ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase__ : Any ):
SCREAMING_SNAKE_CASE : List[str] = pred.label_ids
SCREAMING_SNAKE_CASE : str = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE : Dict = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase__ ) )] ) / len(lowerCAmelCase__ )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE : Tuple = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
SCREAMING_SNAKE_CASE : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : List[str] = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase__ , per_device_train_batch_size=lowerCAmelCase__ , per_device_eval_batch_size=lowerCAmelCase__ , predict_with_generate=lowerCAmelCase__ , evaluation_strategy='''steps''' , do_train=lowerCAmelCase__ , do_eval=lowerCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE : int = SeqaSeqTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
# start training
trainer.train()
| 464
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ : str = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 464
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_A = TypeVar("T")
class _lowerCAmelCase ( Generic[T] ):
def __init__( self , _UpperCamelCase ) -> Dict:
lowerCAmelCase_ = data
lowerCAmelCase_ = None
def __str__( self ) -> str:
return f"""{self.data}"""
class _lowerCAmelCase ( Generic[T] ):
def __init__( self ) -> None:
lowerCAmelCase_ = None
def __iter__( self ) -> Iterator[T]:
lowerCAmelCase_ = self.top
while node:
yield node.data
lowerCAmelCase_ = node.next
def __str__( self ) -> str:
return "->".join([str(_UpperCamelCase ) for item in self] )
def __len__( self ) -> int:
return len(tuple(iter(self ) ) )
def __a ( self ) -> bool:
return self.top is None
def __a ( self , _UpperCamelCase ) -> None:
lowerCAmelCase_ = Node(_UpperCamelCase )
if not self.is_empty():
lowerCAmelCase_ = self.top
lowerCAmelCase_ = node
def __a ( self ) -> T:
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , _UpperCamelCase )
lowerCAmelCase_ = self.top
lowerCAmelCase_ = self.top.next
return pop_node.data
def __a ( self ) -> T:
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __a ( self ) -> None:
lowerCAmelCase_ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 290
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=30 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=None , ) -> str:
__lowercase : Optional[int] = parent
__lowercase : List[str] = batch_size
__lowercase : Dict = image_size
__lowercase : Optional[int] = patch_size
__lowercase : List[str] = num_channels
__lowercase : Optional[Any] = is_training
__lowercase : Optional[Any] = use_labels
__lowercase : Any = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : int = intermediate_size
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : Optional[int] = attention_probs_dropout_prob
__lowercase : Any = type_sequence_label_size
__lowercase : Dict = initializer_range
__lowercase : List[Any] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : int = (image_size // patch_size) ** 2
__lowercase : int = num_patches + 1
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
if self.use_labels:
__lowercase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Any = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ) -> int:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
__lowercase : List[str] = TFViTModel(config=UpperCamelCase_ )
__lowercase : Optional[Any] = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__lowercase : Union[str, Any] = self.image_size // 2
__lowercase : List[str] = pixel_values[:, :, :image_size, :image_size]
__lowercase : str = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ )
__lowercase : Dict = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
__lowercase : Tuple = self.type_sequence_label_size
__lowercase : int = TFViTForImageClassification(UpperCamelCase_ )
__lowercase : Dict = model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__lowercase : List[Any] = self.image_size // 2
__lowercase : Any = pixel_values[:, :, :image_size, :image_size]
__lowercase : Optional[Any] = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase : Dict = 1
__lowercase : Union[str, Any] = TFViTForImageClassification(UpperCamelCase_ )
__lowercase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self ) -> int:
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : int = config_and_inputs
__lowercase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase =(
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase =False
UpperCamelCase =False
UpperCamelCase =False
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : str = TFViTModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def _lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowerCamelCase ( self ) -> Dict:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowerCamelCase ( self ) -> Any:
pass
def _lowerCamelCase ( self ) -> str:
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[int] = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__lowercase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def _lowerCamelCase ( self ) -> Any:
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(UpperCamelCase_ )
__lowercase : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[Any] = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def __UpperCAmelCase ( ):
__lowercase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ) -> str:
__lowercase : Optional[Any] = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
__lowercase : List[Any] = self.default_image_processor
__lowercase : Dict = prepare_img()
__lowercase : int = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# forward pass
__lowercase : Optional[int] = model(**UpperCamelCase_ )
# verify the logits
__lowercase : Optional[Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
__lowercase : str = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 )
| 704
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCAmelCase ( __UpperCamelCase ):
for param in module.parameters():
__lowercase : Tuple = False
def __UpperCAmelCase ( ):
__lowercase : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__lowercase : List[Any] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : List[Any] = plt.imshow(__UpperCamelCase )
fig.axes.get_xaxis().set_visible(__UpperCamelCase )
fig.axes.get_yaxis().set_visible(__UpperCamelCase )
plt.show()
def __UpperCAmelCase ( ):
__lowercase : Optional[Any] = datetime.now()
__lowercase : Optional[Any] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 523
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.