code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
snake_case_ : Any = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def A__ ( ):
_UpperCamelCase : Tuple = Github(os.environ['GITHUB_TOKEN'] )
_UpperCamelCase : List[Any] = g.get_repo('huggingface/diffusers' )
_UpperCamelCase : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
_UpperCamelCase : Dict = sorted(issue.get_comments() , key=lambda UpperCAmelCase_ : i.created_at , reverse=UpperCAmelCase_ )
_UpperCamelCase : List[str] = comments[0] if len(UpperCAmelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 83
| 0
|
import math
def lowerCamelCase__ ( A__ : list , A__ : int ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
__lowerCamelCase = int(math.floor(math.sqrt(A__ ) ) )
__lowerCamelCase = 0
while arr[min(A__ , A__ ) - 1] < x:
__lowerCamelCase = step
step += int(math.floor(math.sqrt(A__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowerCamelCase = prev + 1
if prev == min(A__ , A__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
UpperCAmelCase_ = int(input('Enter the number to be searched:\n'))
UpperCAmelCase_ = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f"""Number {x} is at index {res}""")
| 361
|
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29
| 0
|
def A_ ( snake_case : int = 4000000 ) -> int:
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase , __UpperCamelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case )
__UpperCamelCase , __UpperCamelCase = b, a + b
return sum(snake_case )
if __name__ == "__main__":
print(F"{solution() = }")
| 328
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowercase__ : Optional[Any] = logging.getLogger(__name__)
def A_ ( snake_case : Any=2 , snake_case : Union[str, Any]=3 , snake_case : Union[str, Any]=16 , snake_case : int = 10 , snake_case : int = 2 ) -> int:
'''simple docstring'''
def get_dataset(snake_case : Optional[int] ):
__UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def A_ ( snake_case : List[str] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] , snake_case : int , snake_case : str=None ) -> Any:
'''simple docstring'''
__UpperCamelCase = []
for epoch in range(snake_case ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCamelCase , __UpperCamelCase = batch
__UpperCamelCase = model(snake_case )
__UpperCamelCase = torch.nn.functional.mse_loss(snake_case , snake_case )
accelerator.backward(snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self )-> Tuple:
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
return x * self.a + self.b
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self )-> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
# Train baseline
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = torch.tensor([1, 2, 3] )
__UpperCamelCase = torch.tensor([2, 3, 4] )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(net.parameters() )
__UpperCamelCase = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.9_9 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
__UpperCamelCase = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ : Optional[int] = "/tmp/accelerate/state_checkpointing"
lowercase__ : List[Any] = DummyModel()
lowercase__ : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowercase__ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowercase__ , lowercase__ : str = dummy_dataloaders()
lowercase__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowercase__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowercase__ , lowercase__ : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowercase__ : int = group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowercase__ : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
lowercase__ : Any = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
lowercase__ : List[Any] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 328
| 1
|
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowerCAmelCase_( lowercase_ : Tuple ) -> str:
return EnvironmentCommand()
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> str:
return EnvironmentCommand(args.accelerate_config_file )
class lowerCamelCase_( __lowercase ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=_a )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_a , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_a )
def __init__( self , lowerCamelCase__ , *lowerCamelCase__ ):
_lowerCamelCase = accelerate_config_file
def snake_case__ ( self ):
_lowerCamelCase = '''not installed'''
if is_safetensors_available():
import safetensors
_lowerCamelCase = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
_lowerCamelCase = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_lowerCamelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_a ):
_lowerCamelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
_lowerCamelCase = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_a , _a )
else F"""\t{accelerate_config}"""
)
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''NA'''
if is_torch_available():
import torch
_lowerCamelCase = torch.__version__
_lowerCamelCase = torch.cuda.is_available()
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''NA'''
if is_tf_available():
import tensorflow as tf
_lowerCamelCase = tf.__version__
try:
# deprecated in v2.1
_lowerCamelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_lowerCamelCase = bool(tf.config.list_physical_devices('''GPU''' ) )
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_lowerCamelCase = flax.__version__
_lowerCamelCase = jax.__version__
_lowerCamelCase = jaxlib.__version__
_lowerCamelCase = jax.lib.xla_bridge.get_backend().platform
_lowerCamelCase = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F"""{safetensors_version}""",
'''Accelerate version''': F"""{accelerate_version}""",
'''Accelerate config''': F"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': F"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': F"""{flax_version} ({jax_backend})""",
'''Jax version''': F"""{jax_version}""",
'''JaxLib version''': F"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_a ) )
return info
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 365
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = 'poolformer'
def __init__( self , lowerCamelCase__=3 , lowerCamelCase__=1_6 , lowerCamelCase__=1_6 , lowerCamelCase__=3 , lowerCamelCase__=4.0 , lowerCamelCase__=[2, 2, 6, 2] , lowerCamelCase__=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowerCamelCase__=[7, 3, 3, 3] , lowerCamelCase__=[4, 2, 2, 2] , lowerCamelCase__=[2, 1, 1, 1] , lowerCamelCase__=4 , lowerCamelCase__=0.0 , lowerCamelCase__="gelu" , lowerCamelCase__=True , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0_2 , **lowerCamelCase__ , ):
_lowerCamelCase = num_channels
_lowerCamelCase = patch_size
_lowerCamelCase = stride
_lowerCamelCase = padding
_lowerCamelCase = pool_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = mlp_ratio
_lowerCamelCase = depths
_lowerCamelCase = patch_sizes
_lowerCamelCase = strides
_lowerCamelCase = num_encoder_blocks
_lowerCamelCase = drop_path_rate
_lowerCamelCase = hidden_act
_lowerCamelCase = use_layer_scale
_lowerCamelCase = layer_scale_init_value
_lowerCamelCase = initializer_range
super().__init__(**lowerCamelCase__ )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str = version.parse('1.11' )
@property
def snake_case__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case__ ( self ):
return 2e-3
| 73
| 0
|
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
SCREAMING_SNAKE_CASE__ = generate_large_matrix()
SCREAMING_SNAKE_CASE__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> str:
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
__lowercase = 0
__lowercase = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__lowercase = (left + right) // 2
__lowercase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__lowercase = mid + 1
else:
__lowercase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
__lowercase = 0
__lowercase = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
__lowercase = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
__lowercase = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
from timeit import timeit
print('Running benchmarks' )
__lowercase = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__lowercase = timeit(F"""{func}(grid=grid)""" , setup=__lowerCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 325
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
lowerCamelCase__ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Any = BertTokenizer
def __init__( self : int , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : str=True , lowerCamelCase__ : Tuple="[UNK]" , lowerCamelCase__ : str="[SEP]" , lowerCamelCase__ : Optional[Any]="[PAD]" , lowerCamelCase__ : List[str]="[CLS]" , lowerCamelCase__ : Union[str, Any]="[MASK]" , lowerCamelCase__ : str=True , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars
):
_UpperCAmelCase : str = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) )
_UpperCAmelCase : Optional[Any] = do_lower_case
_UpperCAmelCase : Any = strip_accents
_UpperCAmelCase : List[Any] = tokenize_chinese_chars
_UpperCAmelCase : int = normalizer_class(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = do_lower_case
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=None ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 234
| 0
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
A : List[Any] = logging.get_logger(__name__)
A : str = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class A :
'''simple docstring'''
def __init__(self : Optional[int] , _UpperCAmelCase : Any=None , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
lowercase__ = model
lowercase__ = kwargs.get("""model_save_dir""" , _UpperCAmelCase )
lowercase__ = kwargs.get("""latest_model_name""" , _UpperCAmelCase )
def __call__(self : int , **_UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = {k: np.array(_UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
lowercase__ = """CPUExecutionProvider"""
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowercase__ = self.model_save_dir.joinpath(self.latest_model_name )
lowercase__ = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowercase__ = self.model_save_dir.joinpath(_UpperCAmelCase )
if src_path.exists():
lowercase__ = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : int , ) -> Optional[int]:
"""simple docstring"""
if os.path.isfile(_UpperCAmelCase ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : List[Any] , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[Union[bool, str, None]] = None , _UpperCAmelCase : Optional[Union[str, None]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional["ort.SessionOptions"] = None , **_UpperCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase ):
lowercase__ = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
lowercase__ = Path(_UpperCAmelCase )
# load model from hub
else:
# download model
lowercase__ = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
lowercase__ = Path(_UpperCAmelCase ).parent
lowercase__ = Path(_UpperCAmelCase ).name
lowercase__ = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
return cls(model=_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : int , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : Optional[int] , ) -> str:
"""simple docstring"""
lowercase__ = None
if len(str(_UpperCAmelCase ).split("""@""" ) ) == 2:
lowercase__ , lowercase__ = model_id.split("""@""" )
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 350
|
def UpperCamelCase ( __magic_name__ : str ) -> List[str]: # noqa: E741
"""simple docstring"""
lowercase__ = len(__magic_name__ )
lowercase__ = 0
lowercase__ = [0] * n
lowercase__ = [False] * n
lowercase__ = [False] * n
def dfs(__magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Any ):
if parent == root:
out_edge_count += 1
lowercase__ = True
lowercase__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase__ = dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase__ = True
# AP found via cycle
if at == low[to]:
lowercase__ = True
else:
lowercase__ = min(low[at] , __magic_name__ )
return out_edge_count
for i in range(__magic_name__ ):
if not visited[i]:
lowercase__ = 0
lowercase__ = dfs(__magic_name__ , __magic_name__ , -1 , __magic_name__ )
lowercase__ = out_edge_count > 1
for x in range(len(__magic_name__ ) ):
if is_art[x] is True:
print(__magic_name__ )
# Adjacency list of graph
A : List[str] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 146
| 0
|
def _a ( SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__lowerCAmelCase: Tuple = sorted(string.lower() )
return len(SCREAMING_SNAKE_CASE ) == len(set(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_a = input('''Enter a string ''').strip()
_a = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 322
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322
| 1
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__a = datasets.utils.logging.get_logger(__name__)
__a = ['names', 'prefix']
__a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__a = ['encoding_errors', 'on_bad_lines']
__a = ['date_format']
@dataclass
class A__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase_ : str = ","
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer"
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[Union[int, List[int]]] = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[Union[str, List[str]]] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = "."
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = '"'
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : int = 0
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : int = 1_00_00
UpperCamelCase_ : Optional[datasets.Features] = None
UpperCamelCase_ : Optional[str] = "strict"
UpperCamelCase_ : Literal["error", "warn", "skip"] = "error"
UpperCamelCase_ : Optional[str] = None
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
if self.delimiter is not None:
_UpperCAmelCase : Any = self.delimiter
if self.column_names is not None:
_UpperCAmelCase : List[Any] = self.column_names
@property
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase_ : int = CsvConfig
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
_UpperCAmelCase : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = [files]
_UpperCAmelCase : List[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : str = [files]
_UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_UpperCAmelCase : Tuple = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase : int = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
_UpperCAmelCase : Optional[Any] = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 366
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__a = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : List[Any]=1_8 , lowerCAmelCase__ : str=3_0 , lowerCAmelCase__ : str=4_0_0 , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = size if size is not None else {"height": 2_0, "width": 2_0}
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Optional[Any] = image_size
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : str = max_resolution
_UpperCAmelCase : List[Any] = size
_UpperCAmelCase : Union[str, Any] = do_normalize
_UpperCAmelCase : Optional[Any] = do_convert_rgb
_UpperCAmelCase : str = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
_UpperCAmelCase : str = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_UpperCAmelCase : Optional[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any = PixaStructImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = PixaStructImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) )
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.image_processor_tester.prepare_dummy_image()
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
_UpperCAmelCase : str = 2_0_4_8
_UpperCAmelCase : Any = image_processor(lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : str = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : Union[str, Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_UpperCAmelCase : str = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowerCAmelCase__ ):
_UpperCAmelCase : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
_UpperCAmelCase : Any = "Hello"
_UpperCAmelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
_UpperCAmelCase : Any = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Union[str, Any] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : str = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = PixaStructImageProcessingTester(self , num_channels=4 )
_UpperCAmelCase : List[Any] = 3
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) )
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Any = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Tuple = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 17
| 0
|
"""simple docstring"""
from math import pi, sqrt
def lowercase_ ( _snake_case ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_snake_case ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase_ ( ):
assert gamma(0.5 ) == sqrt(_snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase__ : Dict = 1.0
while num:
UpperCAmelCase__ : List[Any] = float(input('Gamma of: '))
print(f"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 25
|
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25
| 1
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowercase_ = 5
lowercase_ = 10
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Dict = SpeechaTextTokenizer
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : str = True
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict:
"""simple docstring"""
super().setUp()
lowercase__ = sp.SentencePieceProcessor()
spm_model.Load(a )
lowercase__ = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(a ) )]
lowercase__ = dict(zip(a , range(len(a ) ) ) )
lowercase__ = Path(self.tmpdirname )
save_json(a , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(a , save_dir / VOCAB_FILES_NAMES['spm_file'] )
lowercase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Any:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Tuple:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(a ) , 1_001 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_001 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]:
"""simple docstring"""
lowercase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowercase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [289, 50, 14, 174, 386] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
lowercase__ = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(a , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowercase__ = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
lowercase__ = {'input_ids': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
_UpperCamelCase : Tuple = 'valhalla/s2t_mustc_multilinguial_medium'
_UpperCamelCase : List[Any] = 'C\'est trop cool'
_UpperCamelCase : Optional[Any] = 'Esto es genial'
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10_000 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Dict:
"""simple docstring"""
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase__ = [ES_CODE, 4, 1_601, 47, 7_647, 2]
lowercase__ = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Dict:
"""simple docstring"""
lowercase__ = 'fr'
lowercase__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , a )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[Any]:
"""simple docstring"""
lowercase__ = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowercase__ = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 269
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[int, int]:
def constraint_to_multiple_of(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=None ):
lowercase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ = math.ceil(val / multiple ) * multiple
return x
lowercase__ = (output_size, output_size) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else output_size
lowercase__ , lowercase__ = get_image_size(_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = output_size
# determine new height and width
lowercase__ = output_height / input_height
lowercase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ = scale_width
else:
# fit height
lowercase__ = scale_height
lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=_SCREAMING_SNAKE_CASE )
lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=_SCREAMING_SNAKE_CASE )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = ['pixel_values']
def __init__( self : Any , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = False , a : int = 1 , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , )-> None:
"""simple docstring"""
super().__init__(**a )
lowercase__ = size if size is not None else {'height': 384, 'width': 384}
lowercase__ = get_size_dict(a )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = keep_aspect_ratio
lowercase__ = ensure_multiple_of
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : np.ndarray , a : Dict[str, int] , a : bool = False , a : int = 1 , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[Any] , )-> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowercase__ = get_resize_output_image_size(
a , output_size=(size['height'], size['width']) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict , )-> str:
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , )-> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : ImageInput , a : bool = None , a : int = None , a : bool = None , a : int = None , a : PILImageResampling = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : str , )-> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(a )
lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase__ = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : List[Tuple] = None )-> Optional[int]:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(a ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(a ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=a )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 269
| 1
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class A__ :
def __init__( self , __magic_name__ = None ):
if components is None:
lowerCamelCase : Optional[Any] = []
lowerCamelCase : List[str] = list(__magic_name__ )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(__magic_name__ , self.__components ) ) + ")"
def __add__( self , __magic_name__ ):
lowerCamelCase : Any = len(self )
if size == len(__magic_name__ ):
lowerCamelCase : Any = [self.__components[i] + other.component(__magic_name__ ) for i in range(__magic_name__ )]
return Vector(__magic_name__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , __magic_name__ ):
lowerCamelCase : List[str] = len(self )
if size == len(__magic_name__ ):
lowerCamelCase : str = [self.__components[i] - other.component(__magic_name__ ) for i in range(__magic_name__ )]
return Vector(__magic_name__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , __magic_name__ ):
...
@overload
def __mul__( self , __magic_name__ ):
...
def __mul__( self , __magic_name__ ):
if isinstance(__magic_name__ , (float, int) ):
lowerCamelCase : Any = [c * other for c in self.__components]
return Vector(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ) and len(self ) == len(__magic_name__ ):
lowerCamelCase : List[Any] = len(self )
lowerCamelCase : Optional[int] = [self.__components[i] * other.component(__magic_name__ ) for i in range(__magic_name__ )]
return sum(__magic_name__ )
else: # error case
raise Exception("""invalid operand!""" )
def UpperCamelCase__ ( self ):
return Vector(self.__components )
def UpperCamelCase__ ( self , __magic_name__ ):
if isinstance(__magic_name__ , __magic_name__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
assert -len(self.__components ) <= pos < len(self.__components )
lowerCamelCase : Union[str, Any] = value
def UpperCamelCase__ ( self ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
lowerCamelCase : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Tuple = self * other
lowerCamelCase : List[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _a ( lowerCamelCase ):
assert isinstance(lowerCamelCase, lowerCamelCase )
return Vector([0] * dimension )
def _a ( lowerCamelCase, lowerCamelCase ):
assert isinstance(lowerCamelCase, lowerCamelCase ) and (isinstance(lowerCamelCase, lowerCamelCase ))
lowerCamelCase : Tuple = [0] * dimension
lowerCamelCase : Tuple = 1
return Vector(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
assert (
isinstance(lowerCamelCase, lowerCamelCase )
and isinstance(lowerCamelCase, lowerCamelCase )
and (isinstance(lowerCamelCase, (int, float) ))
)
return x * scalar + y
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
random.seed(lowerCamelCase )
lowerCamelCase : str = [random.randint(lowerCamelCase, lowerCamelCase ) for _ in range(lowerCamelCase )]
return Vector(lowerCamelCase )
class A__ :
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Union[str, Any] = matrix
lowerCamelCase : Optional[Any] = w
lowerCamelCase : List[Any] = h
def __str__( self ):
lowerCamelCase : Dict = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , __magic_name__ ):
if self.__width == other.width() and self.__height == other.height():
lowerCamelCase : Dict = []
for i in range(self.__height ):
lowerCamelCase : List[Any] = [
self.__matrix[i][j] + other.component(__magic_name__ , __magic_name__ )
for j in range(self.__width )
]
matrix.append(__magic_name__ )
return Matrix(__magic_name__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , __magic_name__ ):
if self.__width == other.width() and self.__height == other.height():
lowerCamelCase : Optional[int] = []
for i in range(self.__height ):
lowerCamelCase : Optional[Any] = [
self.__matrix[i][j] - other.component(__magic_name__ , __magic_name__ )
for j in range(self.__width )
]
matrix.append(__magic_name__ )
return Matrix(__magic_name__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , __magic_name__ ):
...
@overload
def __mul__( self , __magic_name__ ):
...
def __mul__( self , __magic_name__ ):
if isinstance(__magic_name__ , __magic_name__ ): # matrix-vector
if len(__magic_name__ ) == self.__width:
lowerCamelCase : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
lowerCamelCase : Union[str, Any] = [
self.__matrix[i][j] * other.component(__magic_name__ )
for j in range(self.__width )
]
ans.change_component(__magic_name__ , sum(__magic_name__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(__magic_name__ , (int, float) ): # matrix-scalar
lowerCamelCase : Union[str, Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__magic_name__ , self.__width , self.__height )
return None
def UpperCamelCase__ ( self ):
return self.__height
def UpperCamelCase__ ( self ):
return self.__width
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
lowerCamelCase : Tuple = value
else:
raise Exception("""change_component: indices out of bounds""" )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
lowerCamelCase : str = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__magic_name__ ) ):
lowerCamelCase : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__magic_name__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__magic_name__ , __magic_name__ )
else:
raise Exception("""Indices out of bounds""" )
def UpperCamelCase__ ( self ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowerCamelCase : Tuple = [
self.__matrix[0][y] * self.cofactor(0 , __magic_name__ ) for y in range(self.__width )
]
return sum(__magic_name__ )
def _a ( lowerCamelCase ):
lowerCamelCase : list[list[float]] = [[0] * n for _ in range(lowerCamelCase )]
return Matrix(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
random.seed(lowerCamelCase )
lowerCamelCase : list[list[float]] = [
[random.randint(lowerCamelCase, lowerCamelCase ) for _ in range(lowerCamelCase )] for _ in range(lowerCamelCase )
]
return Matrix(lowerCamelCase, lowerCamelCase, lowerCamelCase )
| 287
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287
| 1
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
a = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
a = '''The dog is cute and lives in the garden house'''
a = jnp.array([tokenizer.encode(__lowerCamelCase )] )
a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
a = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
a = model(__lowerCamelCase )['''last_hidden_state''']
self.assertEqual(output.shape ,__lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
| 330
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'vit_mae'
def __init__( self : Dict ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : List[str]=12 ,__lowerCamelCase : Optional[int]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : Optional[int]=0.0 ,__lowerCamelCase : Dict=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=2_24 ,__lowerCamelCase : str=16 ,__lowerCamelCase : Union[str, Any]=3 ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Dict=16 ,__lowerCamelCase : List[str]=5_12 ,__lowerCamelCase : int=8 ,__lowerCamelCase : int=20_48 ,__lowerCamelCase : Optional[Any]=0.75 ,__lowerCamelCase : int=False ,**__lowerCamelCase : Any ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = decoder_num_attention_heads
a = decoder_hidden_size
a = decoder_num_hidden_layers
a = decoder_intermediate_size
a = mask_ratio
a = norm_pix_loss
| 330
| 1
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __lowerCamelCase (_a ):
def snake_case_ ( self: Optional[Any],A_: str ):
'''simple docstring'''
with open(A_,encoding='utf-8' ) as input_file:
__UpperCamelCase = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
__UpperCamelCase = input_file.read()
__UpperCamelCase = regexp.search(A_ )
return match
def snake_case_ ( self: Optional[Any],A_: str ):
'''simple docstring'''
with open(A_,encoding='utf-8' ) as input_file:
__UpperCamelCase = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()',re.DOTALL )
__UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__UpperCamelCase = regexp.finditer(A_ )
__UpperCamelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = Path('./datasets' )
__UpperCamelCase = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(A_ ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = Path('./datasets' )
__UpperCamelCase = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(A_ ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 310
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
_a = '2020.9.26'
_a = 'xcodz-dot, cclaus, dhruvmanila'
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if not all(isinstance(_lowerCAmelCase, (float, int) ) for val in locals().values() ):
UpperCAmelCase_ : Tuple = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(_lowerCAmelCase )
UpperCAmelCase_ : List[str] = ((x * distance) / (z + distance)) * scale
UpperCAmelCase_ : List[Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if not isinstance(_lowerCAmelCase, _lowerCAmelCase ):
raise TypeError("Axis must be a str" )
UpperCAmelCase_ : List[str] = locals()
del input_variables["axis"]
if not all(isinstance(_lowerCAmelCase, (float, int) ) for val in input_variables.values() ):
UpperCAmelCase_ : Union[str, Any] = (
"""Input values except axis must either be float or int: """
f"""{list(input_variables.values() )}"""
)
raise TypeError(_lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCAmelCase_ : Dict = x * math.cos(_lowerCAmelCase ) - y * math.sin(_lowerCAmelCase )
UpperCAmelCase_ : Any = y * math.cos(_lowerCAmelCase ) + x * math.sin(_lowerCAmelCase )
UpperCAmelCase_ : Union[str, Any] = z
elif axis == "x":
UpperCAmelCase_ : Tuple = y * math.cos(_lowerCAmelCase ) - z * math.sin(_lowerCAmelCase )
UpperCAmelCase_ : Any = z * math.cos(_lowerCAmelCase ) + y * math.sin(_lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = x
elif axis == "y":
UpperCAmelCase_ : Tuple = x * math.cos(_lowerCAmelCase ) - z * math.sin(_lowerCAmelCase )
UpperCAmelCase_ : Optional[Any] = z * math.cos(_lowerCAmelCase ) + x * math.sin(_lowerCAmelCase )
UpperCAmelCase_ : List[str] = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
| 370
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
return (preds == labels).mean()
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0]
UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
| 23
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_a : Optional[Any] = TypeVar('T')
class __A ( Generic[T] ):
def __init__( self , a__ ):
_lowerCAmelCase : Optional[Any] = data
_lowerCAmelCase : Node[T] | None = None
def __str__( self ):
return F"{self.data}"
class __A ( Generic[T] ):
def __init__( self ):
_lowerCAmelCase : Node[T] | None = None
def __iter__( self ):
_lowerCAmelCase : List[Any] = self.top
while node:
yield node.data
_lowerCAmelCase : Dict = node.next
def __str__( self ):
return "->".join([str(a__ ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def __A ( self ):
return self.top is None
def __A ( self , a__ ):
_lowerCAmelCase : str = Node(a__ )
if not self.is_empty():
_lowerCAmelCase : int = self.top
_lowerCAmelCase : List[str] = node
def __A ( self ):
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , a__ )
_lowerCAmelCase : Optional[Any] = self.top
_lowerCAmelCase : Optional[Any] = self.top.next
return pop_node.data
def __A ( self ):
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def __A ( self ):
_lowerCAmelCase : List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 44
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def A ( _lowercase ):
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE : int = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowercase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *_lowercase , **_lowercase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *_lowercase , **_lowercase )
return wrapper
| 182
| 0
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCamelCase__ ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(__lowerCamelCase ), version.parse(__lowerCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = None ):
_SCREAMING_SNAKE_CASE : List[Any] = f"""\n{hint}""" if hint is not None else ""
# non-versioned check
if re.match(R"^[\w_\-\d]+$", __lowerCamelCase ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = requirement, None, None
else:
_SCREAMING_SNAKE_CASE : int = re.findall(R"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", __lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
f""" got {requirement}""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = match[0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = want_full.split("," ) # there could be multiple requirements
_SCREAMING_SNAKE_CASE : Tuple = {}
for w in want_range:
_SCREAMING_SNAKE_CASE : List[Any] = re.findall(R"^([\s!=<>]{1,2})(.+)", __lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
f""" but got {requirement}""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = match[0]
_SCREAMING_SNAKE_CASE : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
_SCREAMING_SNAKE_CASE : List[Any] = ".".join([str(__lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
return
# check if any version is installed
try:
_SCREAMING_SNAKE_CASE : List[str] = importlib.metadata.version(__lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(__lowerCamelCase, __lowerCamelCase )
| 325
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
| 1
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase : Union[str, Any] = (3, 9, -11, 0, 7, 5, 1, -1)
lowerCAmelCase : str = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : int
_UpperCAmelCase : Node | None
class __lowercase :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Iterable[int]):
SCREAMING_SNAKE_CASE_: str = None
for i in sorted(UpperCAmelCase__ , reverse=UpperCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = Node(UpperCAmelCase__ , self.head)
def __iter__( self : str):
SCREAMING_SNAKE_CASE_: List[str] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE_: Any = node.next_node
def __len__( self : Optional[int]):
return sum(1 for _ in self)
def __str__( self : List[str]):
return " -> ".join([str(UpperCAmelCase__) for node in self])
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Optional[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 13
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase_, torch.Tensor):
return image
elif isinstance(UpperCamelCase_, PIL.Image.Image):
__lowercase = [image]
if isinstance(image[0], PIL.Image.Image):
__lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
__lowercase = np.concatenate(UpperCamelCase_, axis=0)
__lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0
__lowercase = image.transpose(0, 3, 1, 2)
__lowercase = 2.0 * image - 1.0
__lowercase = torch.from_numpy(UpperCamelCase_)
elif isinstance(image[0], torch.Tensor):
__lowercase = torch.cat(UpperCamelCase_, dim=0)
return image
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase_, np.ndarray):
__lowercase = True
__lowercase = va.device
__lowercase = va.cpu().numpy()
__lowercase = va.cpu().numpy()
__lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_)))
if np.abs(UpperCamelCase_) > DOT_THRESHOLD:
__lowercase = (1 - t) * va + t * va
else:
__lowercase = np.arccos(UpperCamelCase_)
__lowercase = np.sin(UpperCamelCase_)
__lowercase = theta_a * t
__lowercase = np.sin(UpperCamelCase_)
__lowercase = np.sin(theta_a - theta_t) / sin_theta_a
__lowercase = sin_theta_t / sin_theta_a
__lowercase = sa * va + sa * va
if inputs_are_torch:
__lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_)
return va
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]:
'''simple docstring'''
for param in model.parameters():
__lowercase = value
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ):
super().__init__()
self.register_modules(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, )
__lowercase = (
feature_extractor.size
if isinstance(feature_extractor.size, UpperCAmelCase__ )
else feature_extractor.size["shortest_edge"]
)
__lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, UpperCAmelCase__ )
set_requires_grad(self.clip_model, UpperCAmelCase__ )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : int ):
self.enable_attention_slicing(UpperCAmelCase__ )
def _lowercase ( self : str ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ):
# get the original timestep using init_timestep
__lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ )
__lowercase = max(num_inference_steps - init_timestep, 0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ):
if not isinstance(UpperCAmelCase__, torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" )
__lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ )
]
__lowercase = torch.cat(UpperCAmelCase__, dim=0 )
else:
__lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 0.18_215 * init_latents
__lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 )
__lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
# get latents
__lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = init_latents
return latents
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ):
__lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
__lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ):
__lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ )
__lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ):
__lowercase = latents.detach().requires_grad_()
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowercase = self.scheduler.alphas_cumprod[timestep]
__lowercase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowercase = torch.sqrt(UpperCAmelCase__ )
__lowercase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = self.scheduler.sigmas[index]
__lowercase = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * sample
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ )
__lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype )
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale
__lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0]
if isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = latents.detach() + grads * (sigma**2)
__lowercase = noise_pred_original
else:
__lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1:
__lowercase = [generator] + [None] * (batch_size - 1)
__lowercase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__lowercase = [x[0] for x in coca_is_none if x[1]]
__lowercase = ", ".join(UpperCAmelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
if style_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
# get prompt text embeddings for content and style
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# duplicate text embeddings for each generation per prompt
__lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# set timesteps
__lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowercase = {}
if accepts_offset:
__lowercase = 1
self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device )
__lowercase = timesteps[:1].repeat(UpperCAmelCase__ )
# Preprocess image
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if clip_guidance_scale > 0:
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = slerp(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = content_text_input.input_ids.shape[-1]
__lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
# check if the scheduler accepts generator
__lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowercase = generator
with self.progress_bar(total=UpperCAmelCase__ ):
for i, t in enumerate(UpperCAmelCase__ ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowercase ,__lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowercase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowercase ,__lowercase = self.cond_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * latents
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
| 17
| 0
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any]=1_0_2_4 , lowerCAmelCase_ : Optional[int]=1_0_2_4 , lowerCAmelCase_ : Optional[int]=3.6 ) -> Optional[int]:
__lowerCAmelCase = tokenizer
__lowerCAmelCase = tokenizer.bos_token_id
__lowerCAmelCase = dataset
__lowerCAmelCase = seq_length
__lowerCAmelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Dict ) -> Dict:
__lowerCAmelCase = iter(self.dataset )
__lowerCAmelCase = True
while more_examples:
__lowerCAmelCase , __lowerCAmelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCAmelCase_ )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__lowerCAmelCase = False
break
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , truncation=lowerCAmelCase_ )['input_ids']
__lowerCAmelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCAmelCase_ ) , self.seq_length ):
__lowerCAmelCase = all_token_ids[i : i + self.seq_length]
if len(lowerCAmelCase_ ) == self.seq_length:
yield torch.tensor(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = {'streaming': True}
__lowerCAmelCase = load_dataset(args.dataset_name, split='train', **lowerCAmelCase_ )
__lowerCAmelCase = ConstantLengthDataset(lowerCAmelCase_, lowerCAmelCase_, seq_length=args.seq_length )
__lowerCAmelCase = DataLoader(lowerCAmelCase_, batch_size=args.batch_size )
return eval_dataloader
def a_ ( lowerCAmelCase_ : List[Any] ):
model.eval()
__lowerCAmelCase = []
for step, batch in enumerate(lowerCAmelCase_ ):
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_, labels=lowerCAmelCase_ )
__lowerCAmelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowerCAmelCase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowerCAmelCase = torch.mean(torch.cat(lowerCAmelCase_ ) )
try:
__lowerCAmelCase = torch.exp(lowerCAmelCase_ )
except OverflowError:
__lowerCAmelCase = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
_snake_case : List[str] = Accelerator()
# Parse configuration
_snake_case : int = HfArgumentParser(EvaluationArguments)
_snake_case : int = parser.parse_args()
set_seed(args.seed)
# Logging
_snake_case : Tuple = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_snake_case : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_snake_case : Union[str, Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_snake_case , _snake_case : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_snake_case , _snake_case : Dict = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 207
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_snake_case : Dict = 0
_snake_case : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_snake_case : Tuple = tuple[int, int]
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None , ) -> None:
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = g_cost
__lowerCAmelCase = parent
__lowerCAmelCase = self.calculate_heuristic()
__lowerCAmelCase = self.g_cost + self.h_cost
def lowercase ( self : Any ) -> float:
__lowerCAmelCase = self.pos_x - self.goal_x
__lowerCAmelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCAmelCase_ ) + abs(lowerCAmelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Union[str, Any] , lowerCAmelCase_ : Node ) -> bool:
return self.f_cost < other.f_cost
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : TPosition , lowerCAmelCase_ : TPosition ) -> Tuple:
__lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase_ )
__lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , lowerCAmelCase_ )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = []
__lowerCAmelCase = False
def lowercase ( self : str ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCAmelCase_ )
self.closed_nodes.append(lowerCAmelCase_ )
__lowerCAmelCase = self.get_successors(lowerCAmelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
__lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase_ )
else:
self.open_nodes.append(lowerCAmelCase_ )
return [self.start.pos]
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Node ) -> list[Node]:
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase_ , ) )
return successors
def lowercase ( self : Tuple , lowerCAmelCase_ : Node | None ) -> list[TPosition]:
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : TPosition , lowerCAmelCase_ : TPosition ) -> None:
__lowerCAmelCase = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = False
def lowercase ( self : Dict ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowerCAmelCase = self.fwd_astar.open_nodes.pop(0 )
__lowerCAmelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCAmelCase_ , lowerCAmelCase_ )
self.fwd_astar.closed_nodes.append(lowerCAmelCase_ )
self.bwd_astar.closed_nodes.append(lowerCAmelCase_ )
__lowerCAmelCase = current_bwd_node
__lowerCAmelCase = current_fwd_node
__lowerCAmelCase = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
__lowerCAmelCase = astar.open_nodes.pop(
astar.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCAmelCase_ )
else:
astar.open_nodes.append(lowerCAmelCase_ )
return [self.fwd_astar.start.pos]
def lowercase ( self : Dict , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> list[TPosition]:
__lowerCAmelCase = self.fwd_astar.retrace_path(lowerCAmelCase_ )
__lowerCAmelCase = self.bwd_astar.retrace_path(lowerCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
__lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_snake_case : List[Any] = (0, 0)
_snake_case : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case : int = time.time()
_snake_case : Optional[int] = AStar(init, goal)
_snake_case : int = a_star.search()
_snake_case : Union[str, Any] = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
_snake_case : Any = time.time()
_snake_case : Dict = BidirectionalAStar(init, goal)
_snake_case : Optional[int] = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 207
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a_ ( ):
__lowerCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw ).convert('RGB' )
return image
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : str, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__lowerCAmelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
__lowerCAmelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__lowerCAmelCase = torch.cat((q_bias, torch.zeros_like(lowerCAmelCase_, requires_grad=lowerCAmelCase_ ), v_bias) )
__lowerCAmelCase = qkv_bias
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any ):
__lowerCAmelCase = 364 if 'coco' in model_name else 224
__lowerCAmelCase = BlipaVisionConfig(image_size=lowerCAmelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__lowerCAmelCase = OPTConfig.from_pretrained('facebook/opt-2.7b', eos_token_id=lowerCAmelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
__lowerCAmelCase = OPTConfig.from_pretrained('facebook/opt-6.7b', eos_token_id=lowerCAmelCase_ ).to_dict()
elif "t5-xl" in model_name:
__lowerCAmelCase = TaConfig.from_pretrained('google/flan-t5-xl', dense_act_fn='gelu', bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__lowerCAmelCase = TaConfig.from_pretrained('google/flan-t5-xxl', dense_act_fn='gelu', bos_token_id=1 ).to_dict()
__lowerCAmelCase = BlipaConfig(vision_config=lowerCAmelCase_, text_config=lowerCAmelCase_ )
return config, image_size
@torch.no_grad()
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Optional[int]=None, lowerCAmelCase_ : int=False ):
__lowerCAmelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__lowerCAmelCase = tokenizer('\n', add_special_tokens=lowerCAmelCase_ ).input_ids[0]
__lowerCAmelCase , __lowerCAmelCase = get_blipa_config(lowerCAmelCase_, eos_token_id=lowerCAmelCase_ )
__lowerCAmelCase = BlipaForConditionalGeneration(lowerCAmelCase_ ).eval()
__lowerCAmelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__lowerCAmelCase , __lowerCAmelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = load_model_and_preprocess(
name=lowerCAmelCase_, model_type=lowerCAmelCase_, is_eval=lowerCAmelCase_, device=lowerCAmelCase_ )
original_model.eval()
print('Done!' )
# update state dict keys
__lowerCAmelCase = original_model.state_dict()
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__lowerCAmelCase = state_dict.pop(lowerCAmelCase_ )
if key.startswith('Qformer.bert' ):
__lowerCAmelCase = key.replace('Qformer.bert', 'qformer' )
if "attention.self" in key:
__lowerCAmelCase = key.replace('self', 'attention' )
if "opt_proj" in key:
__lowerCAmelCase = key.replace('opt_proj', 'language_projection' )
if "t5_proj" in key:
__lowerCAmelCase = key.replace('t5_proj', 'language_projection' )
if key.startswith('opt' ):
__lowerCAmelCase = key.replace('opt', 'language' )
if key.startswith('t5' ):
__lowerCAmelCase = key.replace('t5', 'language' )
__lowerCAmelCase = val
# read in qv biases
read_in_q_v_bias(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = hf_model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__lowerCAmelCase = load_demo_image()
__lowerCAmelCase = vis_processors['eval'](lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer(['\n'], return_tensors='pt' ).input_ids.to(lowerCAmelCase_ )
# create processor
__lowerCAmelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size}, image_mean=lowerCAmelCase_, image_std=lowerCAmelCase_ )
__lowerCAmelCase = BlipaProcessor(image_processor=lowerCAmelCase_, tokenizer=lowerCAmelCase_ )
__lowerCAmelCase = processor(images=lowerCAmelCase_, return_tensors='pt' ).pixel_values.to(lowerCAmelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ )
original_model.to(lowerCAmelCase_ )
hf_model.to(lowerCAmelCase_ )
with torch.no_grad():
if "opt" in model_name:
__lowerCAmelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__lowerCAmelCase = hf_model(lowerCAmelCase_, lowerCAmelCase_ ).logits
else:
__lowerCAmelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__lowerCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id, -100 )
__lowerCAmelCase = hf_model(lowerCAmelCase_, lowerCAmelCase_, labels=lowerCAmelCase_ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:', original_logits[0, :3, :3] )
print('First values of HF logits:', logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__lowerCAmelCase = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]], device=lowerCAmelCase_ )
assert torch.allclose(logits[0, :3, :3], lowerCAmelCase_, atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__lowerCAmelCase = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]], device=lowerCAmelCase_ )
else:
# cast to same type
__lowerCAmelCase = logits.dtype
assert torch.allclose(original_logits.to(lowerCAmelCase_ ), lowerCAmelCase_, atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
__lowerCAmelCase = ''
__lowerCAmelCase = tokenizer(lowerCAmelCase_, return_tensors='pt' ).input_ids.to(lowerCAmelCase_ )
__lowerCAmelCase = original_model.generate({'image': original_pixel_values} )
__lowerCAmelCase = hf_model.generate(
lowerCAmelCase_, lowerCAmelCase_, do_sample=lowerCAmelCase_, num_beams=5, max_length=30, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1.0, temperature=1, )
print('Original generation:', lowerCAmelCase_ )
__lowerCAmelCase = input_ids.shape[1]
__lowerCAmelCase = processor.batch_decode(outputs[:, prompt_length:], skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = [text.strip() for text in output_text]
print('HF generation:', lowerCAmelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCAmelCase_ )
hf_model.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_snake_case : Tuple = argparse.ArgumentParser()
_snake_case : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
_snake_case : Optional[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 284
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 284
| 1
|
from math import factorial
lowerCamelCase__ = {str(digit): factorial(digit) for digit in range(10)}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 60 , SCREAMING_SNAKE_CASE_ = 1_000_000 ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
lowerCAmelCase__ : Tuple = 0
# the cached sizes of the previous chains
lowerCAmelCase__ : dict[int, int] = {}
for start_chain_element in range(1 , SCREAMING_SNAKE_CASE_ ):
# The temporary set will contain the elements of the chain
lowerCAmelCase__ : Dict = set()
lowerCAmelCase__ : str = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCAmelCase__ : Dict = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(SCREAMING_SNAKE_CASE_ )
chain_set_length += 1
lowerCAmelCase__ : Union[str, Any] = digit_factorial_sum(SCREAMING_SNAKE_CASE_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCAmelCase__ : Tuple = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 361
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 0
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = (DDIMParallelScheduler,)
__lowerCamelCase : int = (("eta", 0.0), ("num_inference_steps", 50))
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : int = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**lowerCamelCase__ )
return config
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : Dict = self.scheduler_classes[0]
A : str = self.get_scheduler_config(**lowerCamelCase__ )
A : Tuple = scheduler_class(**lowerCamelCase__ )
A , A : Tuple = 10, 0.0
A : Optional[int] = self.dummy_model()
A : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for t in scheduler.timesteps:
A : Optional[int] = model(lowerCamelCase__, lowerCamelCase__ )
A : str = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ).prev_sample
return sample
def _lowerCAmelCase ( self ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
A : Tuple = self.scheduler_classes[0]
A : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
A : List[Any] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowerCAmelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase__, beta_end=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
self.check_over_configs(thresholding=lowerCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase__, prediction_type=lowerCamelCase__, sample_max_value=lowerCamelCase__, )
def _lowerCAmelCase ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500] ):
self.check_over_forward(time_step=lowerCamelCase__, num_inference_steps=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCamelCase__, eta=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.scheduler_classes[0]
A : List[str] = self.get_scheduler_config()
A : Dict = scheduler_class(**lowerCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420, 400 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980, 960 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487, 486 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999, 998 ) - 0.02 ) ) < 1e-5
def _lowerCAmelCase ( self ):
A : int = self.scheduler_classes[0]
A : Any = self.get_scheduler_config()
A : Union[str, Any] = scheduler_class(**lowerCamelCase__ )
A , A : List[str] = 10, 0.0
scheduler.set_timesteps(lowerCamelCase__ )
A : Any = self.dummy_model()
A : Dict = self.dummy_sample_deter
A : Dict = self.dummy_sample_deter + 0.1
A : Tuple = self.dummy_sample_deter - 0.1
A : Tuple = samplea.shape[0]
A : List[Any] = torch.stack([samplea, samplea, samplea], dim=0 )
A : str = torch.arange(lowerCamelCase__ )[0:3, None].repeat(1, lowerCamelCase__ )
A : Any = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
A : Any = scheduler.batch_step_no_noise(lowerCamelCase__, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ), lowerCamelCase__ )
A : List[Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
A : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def _lowerCAmelCase ( self ):
A : Dict = self.full_loop()
A : str = torch.sum(torch.abs(lowerCamelCase__ ) )
A : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def _lowerCAmelCase ( self ):
A : str = self.full_loop(prediction_type="""v_prediction""" )
A : Any = torch.sum(torch.abs(lowerCamelCase__ ) )
A : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def _lowerCAmelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A : Any = self.full_loop(set_alpha_to_one=lowerCamelCase__, beta_start=0.01 )
A : Dict = torch.sum(torch.abs(lowerCamelCase__ ) )
A : str = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def _lowerCAmelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A : int = self.full_loop(set_alpha_to_one=lowerCamelCase__, beta_start=0.01 )
A : List[Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
A : List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 116
|
def __UpperCamelCase ( _lowerCAmelCase = 100_0000 ) -> int:
"""simple docstring"""
A : str = limit + 1
A : Tuple = [0] * limit
for first_term in range(1 , _lowerCAmelCase ):
for n in range(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
A : Any = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
A : Optional[int] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 116
| 1
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=13_37 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
lowercase__ = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowercase__ = None
# the split name of split_dict takes over the name of the split info object
lowercase__ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE ), SplitInfo(dataset_name='''my_dataset''' )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 93
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCAmelCase = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowerCAmelCase = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowerCAmelCase = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: int ) -> List[str]:
"""simple docstring"""
lowercase__ = 0.0
for i, j in zip(UpperCamelCase_ , UpperCamelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(UpperCamelCase_ , UpperCamelCase_ ) else 0.0
lowercase__ = n_correct / len(UpperCamelCase_ )
return {
"accuracy": accuracy,
}
| 93
| 1
|
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=5 ) -> Optional[Any]:
'''simple docstring'''
assert masked_input.count('''<mask>''' ) == 1
UpperCAmelCase = torch.tensor(tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) ).unsqueeze(0 ) # Batch size 1
UpperCAmelCase = model(__lowercase )[0] # The last hidden-state is the first element of the output tuple
UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
UpperCAmelCase = logits[0, masked_index, :]
UpperCAmelCase = logits.softmax(dim=0 )
UpperCAmelCase , UpperCAmelCase = prob.topk(k=__lowercase , dim=0 )
UpperCAmelCase = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__lowercase ) )] )
UpperCAmelCase = tokenizer.mask_token
UpperCAmelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
UpperCAmelCase = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(__lowercase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(__lowercase ) , __lowercase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__lowercase , __lowercase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__A : Any = CamembertTokenizer.from_pretrained("camembert-base")
__A : Optional[Any] = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
__A : int = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 273
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22
| 0
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
__UpperCAmelCase = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
__UpperCAmelCase = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
__UpperCAmelCase = {
'ernie-m-base': 5_14,
'ernie-m-large': 5_14,
}
__UpperCAmelCase = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class __a ( __UpperCamelCase ):
__snake_case : List[str] = ["input_ids"]
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : str = PRETRAINED_INIT_CONFIGURATION
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : str = RESOURCE_FILES_NAMES
def __init__( self : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=False , UpperCAmelCase : str="utf8" , UpperCAmelCase : str="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : int="[PAD]" , UpperCAmelCase : Optional[Any]="[CLS]" , UpperCAmelCase : Optional[Any]="[MASK]" , UpperCAmelCase : Optional[Dict[str, Any]] = None , **UpperCAmelCase : str , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , vocab_file=UpperCAmelCase , encoding=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : Union[str, Any] = sentencepiece_model_ckpt
lowerCAmelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase_ : Tuple = self.load_vocab(filepath=UpperCAmelCase )
else:
lowerCAmelCase_ : int = {self.sp_model.id_to_piece(UpperCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()}
def A ( self : Dict , UpperCAmelCase : Optional[int] ):
if text is None:
return None
lowerCAmelCase_ : List[str] = self.tokenize(UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = """""", []
for i, ch in enumerate(UpperCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase_ : Union[str, Any] = self.SP_CHAR_MAPPING.get(UpperCAmelCase )
else:
lowerCAmelCase_ : Optional[int] = unicodedata.normalize("""NFKC""" , UpperCAmelCase )
if self.is_whitespace(UpperCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCAmelCase ) )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase_ : str = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase_ : str = token[1:]
lowerCAmelCase_ : Dict = text[offset:].index(UpperCAmelCase ) + offset
lowerCAmelCase_ : List[str] = start + len(UpperCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase_ : Optional[Any] = end
return token_mapping
@property
def A ( self : Union[str, Any] ):
return len(self.vocab )
def A ( self : Dict ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : List[str] ):
lowerCAmelCase_ : Tuple = self.__dict__.copy()
lowerCAmelCase_ : Optional[int] = None
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase : int ):
lowerCAmelCase_ : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ : int = {}
lowerCAmelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def A ( self : Tuple , UpperCAmelCase : str ):
return "".join((self.SP_CHAR_MAPPING.get(UpperCAmelCase , UpperCAmelCase ) for c in text) )
def A ( self : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int=False , UpperCAmelCase : Union[str, Any]=64 , UpperCAmelCase : Dict=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
lowerCAmelCase_ : Optional[Any] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
lowerCAmelCase_ : Dict = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
lowerCAmelCase_ : int = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
lowerCAmelCase_ : Optional[int] = self.sp_model.EncodeAsPieces(UpperCAmelCase )
else:
lowerCAmelCase_ : List[Any] = self.sp_model.SampleEncodeAsPieces(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = []
for pi, piece in enumerate(UpperCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCAmelCase ) and pi != 0:
new_pieces.append(UpperCAmelCase )
continue
else:
continue
lowerCAmelCase_ : str = 0
for i, chunk in enumerate(UpperCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCAmelCase ) or self.is_punct(UpperCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCAmelCase )
lowerCAmelCase_ : List[str] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase_ : Union[str, Any] = i
if len(UpperCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def A ( self : str , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase_ : Any = """""".join(UpperCAmelCase ).replace(UpperCAmelCase , """ """ ).strip()
return out_string
def A ( self : Any , UpperCAmelCase : int ):
lowerCAmelCase_ : str = self.convert_ids_to_tokens(UpperCAmelCase )
lowerCAmelCase_ : Any = """""".join(UpperCAmelCase ).replace(UpperCAmelCase , """ """ ).strip()
return out_string
def A ( self : List[str] , UpperCAmelCase : int ):
return self.vocab.get(UpperCAmelCase , self.vocab.get(self.unk_token ) )
def A ( self : List[Any] , UpperCAmelCase : Tuple ):
return self.reverse_vocab.get(UpperCAmelCase , self.unk_token )
def A ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : Optional[int] = [self.cls_token_id]
lowerCAmelCase_ : Tuple = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def A ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def A ( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1]
def A ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCAmelCase ) + 1) + [1] * (len(UpperCAmelCase ) + 3)
def A ( self : Union[str, Any] , UpperCAmelCase : str ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def A ( self : Union[str, Any] , UpperCAmelCase : Any ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def A ( self : Any , UpperCAmelCase : List[str] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def A ( self : str , UpperCAmelCase : List[str] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCAmelCase ) == 1:
lowerCAmelCase_ : Union[str, Any] = unicodedata.category(UpperCAmelCase )
if cat == "Zs":
return True
return False
def A ( self : Dict , UpperCAmelCase : Dict ):
lowerCAmelCase_ : List[Any] = {}
with io.open(UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = line.rstrip("""\n""" )
lowerCAmelCase_ : str = int(UpperCAmelCase )
return token_to_idx
def A ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
lowerCAmelCase_ : Dict = 0
if os.path.isdir(UpperCAmelCase ):
lowerCAmelCase_ : List[Any] = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
lowerCAmelCase_ : List[str] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
lowerCAmelCase_ : List[str] = token_index
writer.write(token + """\n""" )
index += 1
lowerCAmelCase_ : Optional[Any] = os.path.join(UpperCAmelCase , """sentencepiece.bpe.model""" )
with open(UpperCAmelCase , """wb""" ) as fi:
lowerCAmelCase_ : int = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (vocab_file,)
| 28
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __a ( unittest.TestCase ):
def A ( self : List[Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCAmelCase_ : Optional[Any] = Vector()
def A ( self : List[str] ):
lowerCAmelCase_ : Tuple = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase ) , """(0,0,0,0,0,1)""" )
def A ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase ) , 4 )
def A ( self : Dict ):
lowerCAmelCase_ : Dict = Vector([1, 2] )
lowerCAmelCase_ : str = Vector([1, 2, 3, 4, 5] )
lowerCAmelCase_ : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
lowerCAmelCase_ : Optional[int] = Vector([2, -1, 4] ) # for test of dot product
lowerCAmelCase_ : str = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def A ( self : List[str] ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def A ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase , UpperCAmelCase ) ) , """(3,4,7)""" )
def A ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = Vector([1, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : int = x.copy()
self.assertEqual(str(UpperCAmelCase ) , str(UpperCAmelCase ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase ) , """(0,1,0)""" )
def A ( self : Any ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : List[str] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Tuple ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Union[str, Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCAmelCase_ : Any = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def A ( self : Tuple ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def A ( self : Dict ):
lowerCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def A ( self : Optional[int] ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 28
| 1
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__UpperCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 138
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ : str = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 225
| 0
|
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[Any] = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowerCAmelCase_ : int = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase = int(re.match(r""".*layer_(\d*).*""" , lowerCAmelCase )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase = re.search(r"""[^\d](\d+)$""" , str(lowerCAmelCase ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCAmelCase = int(bit_search.groups()[0] )
return bit_size // 8
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
# Construct model
if bloom_config_file == "":
UpperCAmelCase = BloomConfig()
else:
UpperCAmelCase = BloomConfig.from_json_file(lowerCAmelCase )
if shard_model:
UpperCAmelCase = os.listdir(lowerCAmelCase )
UpperCAmelCase = sorted(filter(lambda lowerCAmelCase : s.startswith("""layer""" ) and "model_00" in s , lowerCAmelCase ) )
UpperCAmelCase = {"""weight_map""": {}, """metadata""": {}}
UpperCAmelCase = 0
UpperCAmelCase = None
UpperCAmelCase = BloomConfig()
for j, file in enumerate(lowerCAmelCase ):
print("""Processing file: {}""".format(lowerCAmelCase ) )
UpperCAmelCase = None
for i in range(lowerCAmelCase ):
# load all TP files
UpperCAmelCase = file.replace("""model_00""" , F'''model_0{i}''' )
UpperCAmelCase = torch.load(os.path.join(lowerCAmelCase , lowerCAmelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
UpperCAmelCase = list(temp.keys() )
for key in keys:
UpperCAmelCase = temp.pop(lowerCAmelCase )
if tensors is None:
UpperCAmelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase = torch.cat([tensors[key], temp[key]] , dim=lowerCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase = tensors[key] / pretraining_tp
torch.save(
lowerCAmelCase , os.path.join(
lowerCAmelCase , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(lowerCAmelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(lowerCAmelCase ) ).zfill(5 ) )
UpperCAmelCase = BloomConfig()
UpperCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
UpperCAmelCase = total_size
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(lowerCAmelCase , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase = json.dumps(lowerCAmelCase , indent=2 , sort_keys=lowerCAmelCase ) + """\n"""
f.write(lowerCAmelCase )
else:
UpperCAmelCase = BloomModel(lowerCAmelCase )
UpperCAmelCase = os.listdir(lowerCAmelCase )
UpperCAmelCase = sorted(filter(lambda lowerCAmelCase : s.startswith("""layer""" ) and "model_00" in s , lowerCAmelCase ) )
UpperCAmelCase = None
for i, file in enumerate(lowerCAmelCase ):
UpperCAmelCase = None
for i in range(lowerCAmelCase ):
# load all TP files
UpperCAmelCase = file.replace("""model_00""" , F'''model_0{i}''' )
UpperCAmelCase = torch.load(os.path.join(lowerCAmelCase , lowerCAmelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
UpperCAmelCase = list(temp.keys() )
for key in keys:
UpperCAmelCase = temp.pop(lowerCAmelCase )
if tensors is None:
UpperCAmelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase = torch.cat([tensors[key], temp[key]] , dim=lowerCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase = tensors[key] / pretraining_tp
UpperCAmelCase = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCAmelCase = set(other_keys.missing_keys )
else:
UpperCAmelCase = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
UpperCAmelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCAmelCase = model.to(config.torch_dtype )
torch.save(model.state_dict() , lowerCAmelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 248
|
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 248
| 1
|
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE_ = 1_0
def a_ ( self, **lowerCAmelCase__) -> Tuple:
snake_case_ = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_)
return config
def a_ ( self) -> Tuple:
snake_case_ = 10
snake_case_ = self.get_scheduler_config()
snake_case_ = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_)
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_)
snake_case_ = scheduler.timesteps[0]
snake_case_ = scheduler.timesteps[1]
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = scheduler.step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_).prev_sample
snake_case_ = scheduler.step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
def a_ ( self) -> Optional[int]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_)
def a_ ( self) -> List[str]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_)
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**SCREAMING_SNAKE_CASE_)
snake_case_ = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_)
snake_case_ = scheduler.timesteps
snake_case_ = torch.manual_seed(0)
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_):
# 1. scale model input
snake_case_ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
# 2. predict noise residual
snake_case_ = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
# 3. predict previous sample x_t-1
snake_case_ = scheduler.step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_).prev_sample
snake_case_ = pred_prev_sample
snake_case_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
snake_case_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 192.7614) < 1e-2
assert abs(result_mean.item() - 0.2510) < 1e-3
def a_ ( self) -> Tuple:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**SCREAMING_SNAKE_CASE_)
snake_case_ = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
snake_case_ = scheduler.timesteps
snake_case_ = torch.manual_seed(0)
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case_ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
# 2. predict noise residual
snake_case_ = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
# 3. predict previous sample x_t-1
snake_case_ = scheduler.step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_).prev_sample
snake_case_ = pred_prev_sample
snake_case_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
snake_case_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 347.6357) < 1e-2
assert abs(result_mean.item() - 0.4527) < 1e-3
def a_ ( self) -> int:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**SCREAMING_SNAKE_CASE_)
snake_case_ = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_, msg='`timesteps` must be in descending order.'):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
def a_ ( self) -> List[str]:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**SCREAMING_SNAKE_CASE_)
snake_case_ = [39, 30, 12, 1, 0]
snake_case_ = len(SCREAMING_SNAKE_CASE_)
with self.assertRaises(SCREAMING_SNAKE_CASE_, msg='Can only pass one of `num_inference_steps` or `timesteps`.'):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_, timesteps=SCREAMING_SNAKE_CASE_)
def a_ ( self) -> Dict:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**SCREAMING_SNAKE_CASE_)
snake_case_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_, msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}', ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
| 69
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCamelCase_ = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def __magic_name__ ( __a : Union[str, Any] , __a : Any , __a : Union[str, Any]=None ):
'''simple docstring'''
if rng is None:
UpperCamelCase__ = random.Random()
UpperCamelCase__ = 1
for dim in shape:
total_dims *= dim
UpperCamelCase__ = []
for _ in range(__a ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCamelCase__ = np.array(__a , dtype=jnp.intaa ).reshape(__a )
return output
def __magic_name__ ( __a : Dict , __a : Tuple=None ):
'''simple docstring'''
UpperCamelCase__ = ids_tensor(__a , vocab_size=2 , rng=__a )
# make sure that at least one token is attended to for each batch
UpperCamelCase__ = 1
return attn_mask
@require_flax
class __A:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = ()
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCamelCase__ = 2
UpperCamelCase__ = inputs["""input_ids"""].shape[-1] // 2
UpperCamelCase__ = inputs["""input_ids"""][:max_batch_size, :sequence_length]
UpperCamelCase__ = jnp.ones_like(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCamelCase__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCamelCase__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
UpperCamelCase__ = 0
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = pt_model_class(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase__ = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , flax_model.params )
UpperCamelCase__ = flax_model.generate(SCREAMING_SNAKE_CASE_ ).sequences
UpperCamelCase__ = pt_model.generate(torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCamelCase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = True
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
UpperCamelCase__ = 2
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
UpperCamelCase__ = 2
UpperCamelCase__ = 2
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = True
UpperCamelCase__ = max_length
UpperCamelCase__ = 0.8
UpperCamelCase__ = 10
UpperCamelCase__ = 0.3
UpperCamelCase__ = 1
UpperCamelCase__ = 8
UpperCamelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = max_length
UpperCamelCase__ = 1
UpperCamelCase__ = 8
UpperCamelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = max_length
UpperCamelCase__ = 2
UpperCamelCase__ = 1
UpperCamelCase__ = 8
UpperCamelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase__ = False
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase__ = True
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase__ = 2
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
UpperCamelCase__ = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCamelCase__ = """Hello world"""
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , """do_samples""" ):
model.generate(SCREAMING_SNAKE_CASE_ , do_samples=SCREAMING_SNAKE_CASE_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , """foo""" ):
UpperCamelCase__ = {"""foo""": """bar"""}
model.generate(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 244
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class lowerCAmelCase__ ( __lowercase ):
a__ : str = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a__ : ClassVar[Features] = Features({"""audio""": Audio()} )
a__ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
a__ : str = "audio"
a__ : str = "labels"
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , SCREAMING_SNAKE_CASE__ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__lowerCamelCase = copy.deepcopy(self )
__lowerCamelCase = self.label_schema.copy()
__lowerCamelCase = features[self.label_column]
__lowerCamelCase = label_schema
return task_template
@property
def __A ( self : Union[str, Any] ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 364
|
from __future__ import annotations
def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int | None = None , __lowerCAmelCase : int | None = None ) -> None:
if start is None:
__lowerCamelCase = 0
if end is None:
__lowerCamelCase = len(__lowerCAmelCase ) - 1
if start >= end:
return
__lowerCamelCase = (start + end) // 2
slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
if sequence[end] < sequence[mid]:
__lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end]
slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 339
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''mgp-str'''
def __init__(self , UpperCAmelCase=[3_2, 1_2_8] , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=2_7 , UpperCAmelCase=3_8 , UpperCAmelCase=5_0_2_5_7 , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=4.0 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=False , UpperCAmelCase=0.02 , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase )
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =max_token_length
_lowercase =num_character_labels
_lowercase =num_bpe_labels
_lowercase =num_wordpiece_labels
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =mlp_ratio
_lowercase =distilled
_lowercase =layer_norm_eps
_lowercase =drop_rate
_lowercase =qkv_bias
_lowercase =attn_drop_rate
_lowercase =drop_path_rate
_lowercase =output_aa_attentions
_lowercase =initializer_range
| 5
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ):
'''simple docstring'''
A_ : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case )
return config
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : int = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : int = scheduler_class(**snake_case )
A_ : Tuple = len(snake_case )
A_ : List[str] = self.dummy_model()
A_ : Optional[Any] = self.dummy_sample_deter
A_ : List[str] = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Tuple = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : Optional[int] = pred_prev_sample
A_ : Tuple = torch.sum(torch.abs(snake_case ) )
A_ : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[int] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(prediction_type="v_prediction" )
A_ : List[str] = scheduler_class(**snake_case )
A_ : int = len(snake_case )
A_ : Dict = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Any = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Optional[int] = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : List[str] = pred_prev_sample
A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) )
A_ : List[str] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Dict = scheduler_class(**snake_case )
A_ : Optional[int] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case )
A_ : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(snake_case ):
if i == len(snake_case ) - 1:
A_ : str = -1
else:
A_ : List[str] = timesteps[i + 1]
A_ : Optional[int] = scheduler.previous_timestep(snake_case )
A_ : List[str] = prev_t.item()
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**snake_case )
A_ : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Any = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Union[str, Any] = [100, 87, 50, 1, 0]
A_ : Optional[int] = len(snake_case )
with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case )
| 300
| 0
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
class __lowercase ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
UpperCamelCase : bool = None
UpperCamelCase : bool = None
class __lowercase ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
UpperCamelCase : List[str] = datasets.Audio()
UpperCamelCase : List[str] = "audio"
UpperCamelCase : Dict = AudioFolderConfig
UpperCamelCase : List[str] # definition at the bottom of the script
UpperCamelCase : Any = AudioClassification(audio_column="audio" , label_column="label" )
UpperCAmelCase : Tuple = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase : Optional[Any] = AUDIO_EXTENSIONS
| 353
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {"vocab_file": "spiece.model"}
UpperCAmelCase : Optional[int] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowercase ( a_ ):
"""simple docstring"""
def __init__( self , A , A=False , A=True , A=False , A="<s>" , A="</s>" , A="<unk>" , A="<sep>" , A="<pad>" , A="<cls>" , A="<mask>" , A=["<eop>", "<eod>"] , A = None , **A , ) -> None:
'''simple docstring'''
lowerCamelCase = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
lowerCamelCase = jieba
lowerCamelCase = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __A ( self ) -> int:
'''simple docstring'''
return len(self.sp_model )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__( self , A ) -> int:
'''simple docstring'''
lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , A ) -> Any:
'''simple docstring'''
if self.remove_space:
lowerCamelCase = """ """.join(inputs.strip().split() )
else:
lowerCamelCase = inputs
lowerCamelCase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCamelCase = unicodedata.normalize("""NFKD""" , A )
lowerCamelCase = """""".join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
lowerCamelCase = outputs.lower()
return outputs
def __A ( self , A ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.preprocess_text(A )
lowerCamelCase = self.sp_model.encode(A , out_type=A )
lowerCamelCase = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase = cur_pieces[1:]
else:
lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.PieceToId(A )
def __A ( self , A ) -> int:
'''simple docstring'''
return self.sp_model.IdToPiece(A )
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = """""".join(A ).replace(A , """ """ ).strip()
return out_string
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __A ( self , A , A = None , A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is not None:
return ([0] * len(A )) + [1] + ([0] * len(A )) + [1, 1]
return ([0] * len(A )) + [1, 1]
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def __A ( self , *A , **A ) -> int:
'''simple docstring'''
lowerCamelCase = super()._decode(*A , **A )
lowerCamelCase = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 66
| 0
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ) -> List[Any]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
__lowerCamelCase = TOKENIZER_CLASSES
else:
__lowerCamelCase = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + '''Fast''' )}
logger.info(f'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
__lowerCamelCase = TOKENIZER_CLASSES[tokenizer_name]
__lowerCamelCase = True
if checkpoint_name is None:
__lowerCamelCase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowerCamelCase = [checkpoint_name]
logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
__lowerCamelCase = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowerCamelCase , __lowerCamelCase = checkpoint.split('''/''' )
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
__lowerCamelCase = checkpoint
__lowerCamelCase = dump_path
else:
__lowerCamelCase = None
__lowerCamelCase = dump_path
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowerCamelCase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowerCamelCase = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = None
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
__lowerCamelCase = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(f'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(__lowerCAmelCase )
logger.info(f'''=> removing {file_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 270
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[Any] = TypeVar("T")
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (position - 1) // 2
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 1
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] ) -> None:
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def __A ( self : Union[str, Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCamelCase , __lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCamelCase , __lowerCamelCase = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Update the weight of the given key
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase = (elem, weight)
if position > 0:
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Swap the nodes at the given positions
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase , __lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCamelCase = nodea_pos
__lowerCamelCase = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) -> None:
__lowerCamelCase = {}
__lowerCamelCase = 0
def __repr__( self : Optional[int] ) -> str:
return str(self.connections )
def __len__( self : List[str] ) -> int:
return self.nodes
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowerCamelCase = {}
self.nodes += 1
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = weight
__lowerCamelCase = weight
def __magic_name__ ( __lowerCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__lowerCamelCase = {node: maxsize for node in graph.connections}
__lowerCamelCase = {node: None for node in graph.connections}
__lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCAmelCase , __lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCamelCase = priority_queue.extract_min()
__lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
return dist, parent
| 270
| 1
|
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : str, __A : Optional[int] ):
if isinstance(__A, __A ):
UpperCAmelCase : str = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self : Any, __A : Any, __A : Tuple, __A : Dict ):
if len(__A ) == 0 or len(__A ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(__A ) )
if isinstance(__A, __A ):
UpperCAmelCase : int = [sequences]
UpperCAmelCase : Dict = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(lowerCamelCase__ )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Union[str, Any], __A : List[str]=ZeroShotClassificationArgumentHandler(), *__A : Optional[int], **__A : Optional[Any] ):
UpperCAmelCase : Optional[Any] = args_parser
super().__init__(*__A, **__A )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def __magic_name__ ( self : Union[str, Any] ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def __magic_name__ ( self : str, __A : int, __A : List[Any]=True, __A : Any=True, __A : List[str]=TruncationStrategy.ONLY_FIRST, **__A : List[Any] ):
UpperCAmelCase : Dict = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
UpperCAmelCase : Optional[int] = self.tokenizer.eos_token
try:
UpperCAmelCase : int = self.tokenizer(
__A, add_special_tokens=__A, return_tensors=__A, padding=__A, truncation=__A, )
except Exception as e:
if "too short" in str(__A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : Dict = self.tokenizer(
__A, add_special_tokens=__A, return_tensors=__A, padding=__A, truncation=TruncationStrategy.DO_NOT_TRUNCATE, )
else:
raise e
return inputs
def __magic_name__ ( self : str, **__A : int ):
if kwargs.get('''multi_class''', __A ) is not None:
UpperCAmelCase : Any = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
UpperCAmelCase : Tuple = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : List[str] = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : int = kwargs['''hypothesis_template''']
UpperCAmelCase : int = {}
if "multi_label" in kwargs:
UpperCAmelCase : List[str] = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any], __A : Union[str, List[str]], *__A : Tuple, **__A : Union[str, Any], ):
if len(__A ) == 0:
pass
elif len(__A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : int = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(__A, **__A )
def __magic_name__ ( self : List[Any], __A : Tuple, __A : Optional[Any]=None, __A : List[str]="This example is {}." ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(__A, __A, __A )
for i, (candidate_label, sequence_pair) in enumerate(zip(__A, __A ) ):
UpperCAmelCase : Dict = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__A ) - 1,
**model_input,
}
def __magic_name__ ( self : Dict, __A : Dict ):
UpperCAmelCase : str = inputs['''candidate_label''']
UpperCAmelCase : List[Any] = inputs['''sequence''']
UpperCAmelCase : int = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : List[Any] = self.model(**__A )
UpperCAmelCase : List[str] = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def __magic_name__ ( self : List[str], __A : Optional[Any], __A : int=False ):
UpperCAmelCase : Union[str, Any] = [outputs['''candidate_label'''] for outputs in model_outputs]
UpperCAmelCase : Union[str, Any] = [outputs['''sequence'''] for outputs in model_outputs]
UpperCAmelCase : Any = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
UpperCAmelCase : List[Any] = logits.shape[0]
UpperCAmelCase : Union[str, Any] = len(__A )
UpperCAmelCase : str = N // n
UpperCAmelCase : Dict = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : Union[str, Any] = self.entailment_id
UpperCAmelCase : List[str] = -1 if entailment_id == 0 else 0
UpperCAmelCase : Tuple = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : Dict = np.exp(__A ) / np.exp(__A ).sum(-1, keepdims=__A )
UpperCAmelCase : Dict = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : List[str] = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : str = np.exp(__A ) / np.exp(__A ).sum(-1, keepdims=__A )
UpperCAmelCase : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 99
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCamelCase : Optional[Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def a__ ( ) -> List[Any]:
UpperCAmelCase : Dict = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase : List[Any] = get_sagemaker_input()
else:
UpperCAmelCase : Optional[Any] = get_cluster_input()
return config
def a__ ( UpperCAmelCase : Union[str, Any]=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase : Optional[Any] = subparsers.add_parser('''config''' , description=UpperCAmelCase )
else:
UpperCAmelCase : List[str] = argparse.ArgumentParser('''Accelerate config command''' , description=UpperCAmelCase )
parser.add_argument(
'''--config_file''' , default=UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
UpperCAmelCase : str = get_user_input()
if args.config_file is not None:
UpperCAmelCase : Any = args.config_file
else:
if not os.path.isdir(UpperCAmelCase ):
os.makedirs(UpperCAmelCase )
UpperCAmelCase : List[str] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(UpperCAmelCase )
else:
config.to_yaml_file(UpperCAmelCase )
print(f'''accelerate configuration saved at {config_file}''' )
def a__ ( ) -> Dict:
UpperCAmelCase : str = config_command_parser()
UpperCAmelCase : str = parser.parse_args()
config_command(UpperCAmelCase )
if __name__ == "__main__":
main()
| 99
| 1
|
from __future__ import annotations
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None ):
if start is None:
lowercase :Any = 0
if end is None:
lowercase :Any = len(lowerCamelCase ) - 1
if start >= end:
return
lowercase :Optional[Any] = (start + end) // 2
slowsort(lowerCamelCase, lowerCamelCase, lowerCamelCase )
slowsort(lowerCamelCase, mid + 1, lowerCamelCase )
if sequence[end] < sequence[mid]:
lowercase , lowercase :List[str] = sequence[mid], sequence[end]
slowsort(lowerCamelCase, lowerCamelCase, end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 236
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[str] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 236
| 1
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__A = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any]) ->None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
| 273
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A = direct_transformers_import(PATH_TO_TRANSFORMERS)
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: List[Any] =None
# source code of `config_class`
lowerCamelCase__: List[Any] =inspect.getsource(__a )
lowerCamelCase__: str =_re_checkpoint.findall(__a )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
lowerCamelCase__: str =ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__: Optional[int] =F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__: List[Any] =ckpt_name
break
return checkpoint
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =[]
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCamelCase__: List[Any] =get_checkpoint_from_config_class(__a )
lowerCamelCase__: Tuple =config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a )
if len(__a ) > 0:
lowerCamelCase__: List[str] ="\n".join(sorted(__a ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 273
| 1
|
def a ( snake_case__: int = 600_851_475_143 ):
'''simple docstring'''
try:
lowercase_ = int(snake_case__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase_ = 2
lowercase_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase_ = i
while n % i == 0:
lowercase_ = n // i
i += 1
return int(snake_case__ )
if __name__ == "__main__":
print(f"{solution() = }")
| 30
|
from math import factorial
class __a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = real
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Union[str, Any] = [1] * rank
else:
UpperCamelCase__ : int = rank
def __repr__( self : Tuple ):
'''simple docstring'''
return (
F'{self.real}+'
F'{"+".join(str(SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , SCREAMING_SNAKE_CASE )
def __add__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return Dual(self.real + other , self.duals )
UpperCamelCase__ : Optional[int] = self.duals.copy()
UpperCamelCase__ : Any = other.duals.copy()
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )) )
elif len(SCREAMING_SNAKE_CASE ) < len(SCREAMING_SNAKE_CASE ):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )) )
UpperCamelCase__ : Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Dict = __add__
def __sub__( self : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return self + other * -1
def __mul__( self : int , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : List[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , SCREAMING_SNAKE_CASE )
raise ValueError
def __floordiv__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , SCREAMING_SNAKE_CASE )
raise ValueError
def __pow__( self : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if n < 0 or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase__ : str = self
for _ in range(n - 1 ):
x *= self
return x
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
UpperCamelCase__ : Optional[Any] = Dual(__lowerCAmelCase , 1 )
UpperCamelCase__ : Any = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 189
| 0
|
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 4000000 ) -> Tuple:
'''simple docstring'''
A__ = [0, 1]
A__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
A__ = 0
for j in range(len(A__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 351
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = VideoToVideoSDPipeline
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
lowerCamelCase = False
# No `output_type`.
lowerCamelCase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def snake_case__ ( self : Tuple )-> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4),layers_per_block=2,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=3_2,attention_head_dim=4,)
A__ = DDIMScheduler(
beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,)
A__ = CLIPTextModel(lowercase_ )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int],lowercase_ : List[Any]=0 )-> Any:
'''simple docstring'''
A__ = floats_tensor((1, 3, 3, 3_2, 3_2),rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith('mps' ):
A__ = torch.manual_seed(lowercase_ )
else:
A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case__ ( self : List[Any] )-> List[Any]:
'''simple docstring'''
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = VideoToVideoSDPipeline(**lowercase_ )
A__ = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
A__ = self.get_dummy_inputs(lowercase_ )
A__ = 'np'
A__ = sd_pipe(**lowercase_ ).frames
A__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
A__ = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case__ ( self : List[Any] )-> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[int] )-> Optional[Any]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
A__ = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6),generator=lowercase_ )
A__ = video.to('cuda' )
A__ = 'Spiderman is surfing'
A__ = pipe(lowercase_,video=lowercase_,generator=lowercase_,num_inference_steps=3,output_type='pt' ).frames
A__ = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 282
| 0
|
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 92
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Optional[Any] = (KDPMaDiscreteScheduler,)
__a : Dict = 10
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCAmelCase__ )
return config
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type='''v_prediction''' )
__lowercase = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
if torch_device == "mps":
return
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) )
if str(lowerCAmelCase__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 210
| 0
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( lowercase , lowercase ):
lowercase__ = 1
@register_to_config
def __init__( self : Optional[Any] ,lowerCamelCase__ : int = 1000 ,lowerCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(lowerCamelCase__ )
# standard deviation of the initial noise distribution
_UpperCamelCase : Dict = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_UpperCamelCase : str = 4
# running values
_UpperCamelCase : List[Any] = []
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = num_inference_steps
_UpperCamelCase : Optional[Any] = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
_UpperCamelCase : Optional[Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_UpperCamelCase : str = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
_UpperCamelCase : Union[str, Any] = torch.sin(steps * math.pi / 2 ) ** 2
_UpperCamelCase : List[str] = (1.0 - self.betas**2) ** 0.5
_UpperCamelCase : List[Any] = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
_UpperCamelCase : Optional[Any] = timesteps.to(lowerCamelCase__ )
_UpperCamelCase : Tuple = []
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : int ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : bool = True ,):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
_UpperCamelCase : int = (self.timesteps == timestep).nonzero().item()
_UpperCamelCase : int = timestep_index + 1
_UpperCamelCase : Any = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCamelCase__ )
if len(self.ets ) == 1:
_UpperCamelCase : Optional[Any] = self.ets[-1]
elif len(self.ets ) == 2:
_UpperCamelCase : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_UpperCamelCase : Optional[Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_UpperCamelCase : Any = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_UpperCamelCase : Any = self._get_prev_sample(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : torch.FloatTensor ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Tuple ):
'''simple docstring'''
return sample
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : str = self.alphas[timestep_index]
_UpperCamelCase : Dict = self.betas[timestep_index]
_UpperCamelCase : Optional[Any] = self.alphas[prev_timestep_index]
_UpperCamelCase : Tuple = self.betas[prev_timestep_index]
_UpperCamelCase : Optional[Any] = (sample - sigma * ets) / max(lowerCamelCase__ ,1E-8 )
_UpperCamelCase : Dict = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ):
'''simple docstring'''
return self.config.num_train_timesteps
| 367
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-roberta-xl"""
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=250880 ,lowerCamelCase__ : Tuple=2560 ,lowerCamelCase__ : Union[str, Any]=36 ,lowerCamelCase__ : List[str]=32 ,lowerCamelCase__ : Optional[Any]=10240 ,lowerCamelCase__ : Tuple="gelu" ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Optional[int]=514 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : Dict=0.0_2 ,lowerCamelCase__ : Any=1E-05 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=0 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=None ,**lowerCamelCase__ : Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : Optional[int] = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : List[Any] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : Optional[int] = layer_norm_eps
_UpperCamelCase : Optional[int] = position_embedding_type
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class lowercase__ ( lowercase ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 236
| 0
|
'''simple docstring'''
import numpy as np
def UpperCAmelCase_ (__a : np.ndarray , __a : np.ndarray , __a : float = 1e-12 , __a : int = 1_0_0 , ):
"""simple docstring"""
assert np.shape(__a )[0] == np.shape(__a )[1]
# Ensure proper dimensionality.
assert np.shape(__a )[0] == np.shape(__a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__a ) == np.iscomplexobj(__a )
_a : List[Any] = np.iscomplexobj(__a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__a , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_a : Dict = False
_a : Optional[int] = 0
_a : List[Any] = 0
_a : Optional[Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
_a : Optional[int] = np.dot(__a , __a )
# Normalize the resulting output vector.
_a : Dict = w / np.linalg.norm(__a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_a : int = vector.conj().T if is_complex else vector.T
_a : Union[str, Any] = np.dot(__a , np.dot(__a , __a ) )
# Check convergence.
_a : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_a : int = True
_a : Optional[Any] = lambda_
if is_complex:
_a : int = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
_a : Optional[int] = np.array([4_1, 4, 2_0] )
_a : str = real_input_matrix.astype(np.complexaaa )
_a : Union[str, Any] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_a : Optional[int] = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_a : Tuple = real_input_matrix
_a : Union[str, Any] = real_vector
elif problem_type == "complex":
_a : Tuple = complex_input_matrix
_a : Optional[Any] = complex_vector
# Our implementation.
_a, _a : Dict = power_iteration(__a , __a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_a, _a : Any = np.linalg.eigh(__a )
# Last eigenvalue is the maximum one.
_a : Any = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_a : Tuple = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 271
|
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Dict = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : Dict = '''default_config.yaml'''
__UpperCAmelCase : Optional[Any] = config_folder / config_file
__UpperCAmelCase : Dict = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : Any = Path('''tests/test_configs''' )
@classmethod
def __lowercase ( cls : int ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] ,env=os.environ.copy() )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_a ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_a ), self.test_file_path] ,env=os.environ.copy() )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
execute_subprocess_async(['accelerate', 'test'] ,env=os.environ.copy() )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''test-tpu'''
__UpperCAmelCase : Any = '''us-central1-a'''
__UpperCAmelCase : List[Any] = '''ls'''
__UpperCAmelCase : Any = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : List[Any] = '''Running gcloud compute tpus tpu-vm ssh'''
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[Any] = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_a ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_a ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] ,return_stdout=_a )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[Any] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_a ,)
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[str] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" ,_a ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Any = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
| 271
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class a :
"""simple docstring"""
SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : Tuple[int]
def lowerCamelCase__ ( self : Any ) -> int:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowerCamelCase__ ( self : Any ) -> torch.Tensor:
__UpperCAmelCase : Dict = torch.arange(self.height * self.width )
__UpperCAmelCase : Dict = torch.stack(
[
pixel_indices % self.width,
torch.div(snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def lowerCamelCase__ ( self : Any ) -> int:
__UpperCAmelCase , *__UpperCAmelCase : str = self.shape
__UpperCAmelCase : Dict = int(np.prod(snake_case ) )
__UpperCAmelCase : Tuple = self.get_image_coords()
__UpperCAmelCase : List[Any] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__UpperCAmelCase : Any = self.get_camera_rays(snake_case )
__UpperCAmelCase : List[str] = rays.view(snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : torch.Tensor ) -> torch.Tensor:
__UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase : List[str] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__UpperCAmelCase : List[str] = coords.view(snake_case , -1 , 2 )
__UpperCAmelCase : Optional[Any] = self.resolution()
__UpperCAmelCase : Tuple = self.fov()
__UpperCAmelCase : Optional[int] = (flat.float() / (res - 1)) * 2 - 1
__UpperCAmelCase : Union[str, Any] = fracs * torch.tan(fov / 2 )
__UpperCAmelCase : str = fracs.view(snake_case , -1 , 2 )
__UpperCAmelCase : Any = (
self.z.view(snake_case , 1 , 3 )
+ self.x.view(snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
__UpperCAmelCase : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=snake_case )
__UpperCAmelCase : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(snake_case , *snake_case , 2 , 3 )
def lowerCamelCase__ ( self : Any , snake_case : int , snake_case : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=snake_case , height=snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def _a ( _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : str = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : List[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__UpperCAmelCase : Dict = np.array([np.sin(_lowercase ), np.cos(_lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__UpperCAmelCase : Any = -z * 4
__UpperCAmelCase : Dict = np.array([np.cos(_lowercase ), -np.sin(_lowercase ), 0.0] )
__UpperCAmelCase : List[str] = np.cross(_lowercase , _lowercase )
origins.append(_lowercase )
xs.append(_lowercase )
ys.append(_lowercase )
zs.append(_lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , width=_lowercase , height=_lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_lowercase )) , )
| 240
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__UpperCAmelCase :Tuple = "Create a default config file for Accelerate with only a few flags set."
def _a ( _lowercase : List[Any]="no" , _lowercase : str = default_json_config_file , _lowercase : bool = False ):
'''simple docstring'''
__UpperCAmelCase : Dict = Path(_lowercase )
path.parent.mkdir(parents=_lowercase , exist_ok=_lowercase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__UpperCAmelCase : List[str] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__UpperCAmelCase : int = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
__UpperCAmelCase : Optional[Any] = torch.cuda.device_count()
__UpperCAmelCase : List[str] = num_gpus
__UpperCAmelCase : int = False
if num_gpus > 1:
__UpperCAmelCase : Any = '''MULTI_GPU'''
else:
__UpperCAmelCase : int = '''NO'''
elif is_xpu_available() and use_xpu:
__UpperCAmelCase : List[Any] = torch.xpu.device_count()
__UpperCAmelCase : List[Any] = num_xpus
__UpperCAmelCase : Optional[int] = False
if num_xpus > 1:
__UpperCAmelCase : Any = '''MULTI_XPU'''
else:
__UpperCAmelCase : Optional[Any] = '''NO'''
elif is_npu_available():
__UpperCAmelCase : Dict = torch.npu.device_count()
__UpperCAmelCase : Any = num_npus
__UpperCAmelCase : Any = False
if num_npus > 1:
__UpperCAmelCase : Dict = '''MULTI_NPU'''
else:
__UpperCAmelCase : Optional[int] = '''NO'''
else:
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Tuple = '''NO'''
__UpperCAmelCase : List[Any] = ClusterConfig(**_lowercase )
config.to_json_file(_lowercase )
return path
def _a ( _lowercase : Union[str, Any] , _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = parser.add_parser('''default''' , parents=_lowercase , help=_lowercase , formatter_class=_lowercase )
parser.add_argument(
'''--config_file''' , default=_lowercase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=_lowercase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=_lowercase )
return parser
def _a ( _lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 240
| 1
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : str = (EulerDiscreteScheduler,)
__A : Optional[Any] = 1_0
def __lowercase ( self , **lowercase) -> int:
'''simple docstring'''
a__ : str = {
'num_train_timesteps': 1100,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase)
return config
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase)
def __lowercase ( self) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02]):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[str] = self.scheduler_classes[0]
a__ : Optional[Any] = self.get_scheduler_config()
a__ : Any = scheduler_class(**lowercase)
scheduler.set_timesteps(self.num_inference_steps)
a__ : Dict = torch.manual_seed(0)
a__ : Dict = self.dummy_model()
a__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Tuple = sample.to(lowercase)
for i, t in enumerate(scheduler.timesteps):
a__ : str = scheduler.scale_model_input(lowercase , lowercase)
a__ : List[str] = model(lowercase , lowercase)
a__ : List[str] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase)
a__ : Optional[Any] = output.prev_sample
a__ : List[str] = torch.sum(torch.abs(lowercase))
a__ : List[Any] = torch.mean(torch.abs(lowercase))
assert abs(result_sum.item() - 10.08_07) < 1e-2
assert abs(result_mean.item() - 0.01_31) < 1e-3
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : int = self.scheduler_classes[0]
a__ : Optional[Any] = self.get_scheduler_config(prediction_type='v_prediction')
a__ : Any = scheduler_class(**lowercase)
scheduler.set_timesteps(self.num_inference_steps)
a__ : Tuple = torch.manual_seed(0)
a__ : Dict = self.dummy_model()
a__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : str = sample.to(lowercase)
for i, t in enumerate(scheduler.timesteps):
a__ : int = scheduler.scale_model_input(lowercase , lowercase)
a__ : Tuple = model(lowercase , lowercase)
a__ : Union[str, Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase)
a__ : Optional[int] = output.prev_sample
a__ : Dict = torch.sum(torch.abs(lowercase))
a__ : int = torch.mean(torch.abs(lowercase))
assert abs(result_sum.item() - 0.00_02) < 1e-2
assert abs(result_mean.item() - 2.2676e-06) < 1e-3
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = self.scheduler_classes[0]
a__ : Any = self.get_scheduler_config()
a__ : Optional[int] = scheduler_class(**lowercase)
scheduler.set_timesteps(self.num_inference_steps , device=lowercase)
a__ : Optional[Any] = torch.manual_seed(0)
a__ : Dict = self.dummy_model()
a__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a__ : int = sample.to(lowercase)
for t in scheduler.timesteps:
a__ : List[str] = scheduler.scale_model_input(lowercase , lowercase)
a__ : Optional[int] = model(lowercase , lowercase)
a__ : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase)
a__ : Optional[Any] = output.prev_sample
a__ : Tuple = torch.sum(torch.abs(lowercase))
a__ : Union[str, Any] = torch.mean(torch.abs(lowercase))
assert abs(result_sum.item() - 10.08_07) < 1e-2
assert abs(result_mean.item() - 0.01_31) < 1e-3
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.scheduler_classes[0]
a__ : int = self.get_scheduler_config()
a__ : Optional[int] = scheduler_class(**lowercase , use_karras_sigmas=lowercase)
scheduler.set_timesteps(self.num_inference_steps , device=lowercase)
a__ : Tuple = torch.manual_seed(0)
a__ : int = self.dummy_model()
a__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a__ : Tuple = sample.to(lowercase)
for t in scheduler.timesteps:
a__ : Optional[int] = scheduler.scale_model_input(lowercase , lowercase)
a__ : Tuple = model(lowercase , lowercase)
a__ : Optional[int] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase)
a__ : Tuple = output.prev_sample
a__ : List[Any] = torch.sum(torch.abs(lowercase))
a__ : Any = torch.mean(torch.abs(lowercase))
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19) < 1e-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63) < 1e-3
| 99
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Optional[int] = ShapEImgaImgPipeline
__A : Tuple = ['''image''']
__A : Any = ['''image''']
__A : Optional[Any] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__A : Dict = False
@property
def __lowercase ( self) -> Any:
'''simple docstring'''
return 32
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return 32
@property
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return 8
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0)
a__ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
a__ : Dict = CLIPVisionModel(lowercase)
return model
@property
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : str = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase , do_normalize=lowercase , do_resize=lowercase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def __lowercase ( self) -> str:
'''simple docstring'''
torch.manual_seed(0)
a__ : str = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
a__ : Any = PriorTransformer(**lowercase)
return model
@property
def __lowercase ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__ : List[Any] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
a__ : List[str] = ShapERenderer(**lowercase)
return model
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Dict = self.dummy_prior
a__ : List[str] = self.dummy_image_encoder
a__ : int = self.dummy_image_processor
a__ : str = self.dummy_renderer
a__ : Optional[int] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=lowercase , clip_sample=lowercase , clip_sample_range=1.0 , )
a__ : List[Any] = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowercase ( self , lowercase , lowercase=0) -> List[str]:
'''simple docstring'''
a__ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
if str(lowercase).startswith('mps'):
a__ : List[str] = torch.manual_seed(lowercase)
else:
a__ : str = torch.Generator(device=lowercase).manual_seed(lowercase)
a__ : Tuple = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : int = 'cpu'
a__ : List[str] = self.get_dummy_components()
a__ : Dict = self.pipeline_class(**lowercase)
a__ : Optional[int] = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : Tuple = pipe(**self.get_dummy_inputs(lowercase))
a__ : Any = output.images[0]
a__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a__ : List[str] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __lowercase ( self) -> Any:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : str = torch_device == 'cpu'
a__ : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase , relax_max_difference=lowercase , )
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[str] = self.get_dummy_components()
a__ : str = self.pipeline_class(**lowercase)
a__ : List[str] = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = 1
a__ : List[str] = 2
a__ : Optional[Any] = self.get_dummy_inputs(lowercase)
for key in inputs.keys():
if key in self.batch_params:
a__ : Any = batch_size * [inputs[key]]
a__ : int = pipe(**lowercase , num_images_per_prompt=lowercase)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png')
a__ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy')
a__ : List[str] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img')
a__ : Tuple = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : List[Any] = torch.Generator(device=lowercase).manual_seed(0)
a__ : Optional[int] = pipe(
lowercase , generator=lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 99
| 1
|
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _a ( _a ):
_lowercase : List[str] = (DPMSolverSDEScheduler,)
_lowercase : List[str] = 10
def lowerCamelCase_ ( self: List[str] , **UpperCamelCase_: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = {
"num_train_timesteps": 1_100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**_a )
return config
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def lowerCamelCase_ ( self: List[str] ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def lowerCamelCase_ ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def lowerCamelCase_ ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = scheduler.scale_model_input(_a , _a )
lowercase__ = model(_a , _a )
lowercase__ = scheduler.step(_a , _a , _a )
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(_a ) )
lowercase__ = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowercase__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = scheduler.scale_model_input(_a , _a )
lowercase__ = model(_a , _a )
lowercase__ = scheduler.step(_a , _a , _a )
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(_a ) )
lowercase__ = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase__ = scheduler.scale_model_input(_a , _a )
lowercase__ = model(_a , _a )
lowercase__ = scheduler.step(_a , _a , _a )
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(_a ) )
lowercase__ = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
lowercase__ = sample.to(_a )
for t in scheduler.timesteps:
lowercase__ = scheduler.scale_model_input(_a , _a )
lowercase__ = model(_a , _a )
lowercase__ = scheduler.step(_a , _a , _a )
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(_a ) )
lowercase__ = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 370
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ) -> Any:
"""simple docstring"""
lowercase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase__ = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase__ = get_activation('''gelu''' )
lowercase__ = get_activation('''gelu_10''' )
lowercase__ = torch_builtin(UpperCamelCase_ )
lowercase__ = geluaa(UpperCamelCase_ )
lowercase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(UpperCamelCase_ ):
get_activation('''bogus''' )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = get_activation('''gelu''' )
lowercase__ = 1
lowercase__ = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
lowercase__ = acta.a
| 93
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__snake_case : Optional[int] = True
except (ImportError, AttributeError):
__snake_case : Union[str, Any] = object
def __lowerCamelCase ( *__snake_case : str, **__snake_case : str ) -> Any:
"""simple docstring"""
pass
__snake_case : List[str] = False
__snake_case : Tuple = logging.get_logger('transformers-cli/serving')
def __lowerCamelCase ( __snake_case : Namespace ) -> Optional[Any]:
"""simple docstring"""
A__ : Optional[int] =pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(__snake_case, args.host, args.port, args.workers )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
@staticmethod
def lowercase__ ( lowerCAmelCase_ : ArgumentParser ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=lowerCAmelCase_ , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=lowerCAmelCase_ , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=lowerCAmelCase_ , default=88_88 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=lowerCAmelCase_ , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=lowerCAmelCase_ , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=lowerCAmelCase_ , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=lowerCAmelCase_ , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=lowerCAmelCase_ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : List[Any] , lowerCAmelCase_ : Pipeline , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
A__ : Any =pipeline
A__ : Tuple =host
A__ : Any =port
A__ : str =workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f"Serving model over {host}:{port}" )
A__ : int =FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ),
] , timeout=6_00 , )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> int:
'''simple docstring'''
try:
A__ : Any =self._pipeline.tokenizer.tokenize(lowerCAmelCase_ )
if return_ids:
A__ : str =self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
return ServeTokenizeResult(tokens=lowerCAmelCase_ , tokens_ids=lowerCAmelCase_ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(lowerCAmelCase_ )} )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[int] = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , ) -> Optional[int]:
'''simple docstring'''
try:
A__ : int =self._pipeline.tokenizer.decode(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return ServeDeTokenizeResult(model="""""" , text=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(lowerCAmelCase_ )} )
async def lowercase__ ( self : List[Any] , lowerCAmelCase_ : int=Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> Any:
'''simple docstring'''
# Check we don't have empty string
if len(lowerCAmelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
A__ : Tuple =self._pipeline(lowerCAmelCase_ )
return ServeForwardResult(output=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(5_00 , {"""error""": str(lowerCAmelCase_ )} )
| 134
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Union[str, Any], __snake_case : Dict ) -> Dict:
"""simple docstring"""
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def __lowerCamelCase ( __snake_case : str, __snake_case : int, __snake_case : Dict, __snake_case : int="attention" ) -> str:
"""simple docstring"""
A__ : Union[str, Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
A__ : str =k_tmp.reshape(k_tmp.shape[0], k_tmp.shape[1] * k_tmp.shape[2] )
A__ : List[Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
A__ : Optional[int] =o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1], o_tmp.shape[2] )
A__ : Dict =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
A__ : Dict =q_tmp.reshape(q_tmp.shape[0], q_tmp.shape[1] * q_tmp.shape[2] )
A__ : Union[str, Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
A__ : List[str] =v_tmp.reshape(v_tmp.shape[0], v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Any, __snake_case : Tuple, __snake_case : Optional[Any]=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
A__ : Any =params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
A__ : int =params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
A__ : Optional[Any] =(wi_a, wi_a)
else:
A__ : Optional[int] =params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
A__ : int =params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : str, __snake_case : Any, __snake_case : int ) -> List[Any]:
"""simple docstring"""
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def __lowerCamelCase ( __snake_case : dict, *, __snake_case : int, __snake_case : bool, __snake_case : bool = False ) -> Union[str, Any]:
"""simple docstring"""
A__ : Optional[int] =traverse_util.flatten_dict(variables["""target"""] )
A__ : int ={"""/""".join(__snake_case ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A__ : List[Any] ="""encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""", __snake_case )
A__ : Optional[int] =collections.OrderedDict()
# Shared embeddings.
A__ : List[Any] =old["""token_embedder/embedding"""]
# Encoder.
for i in range(__snake_case ):
# Block i, layer 0 (Self Attention).
A__ : Optional[Any] =tax_layer_norm_lookup(__snake_case, __snake_case, """encoder""", """pre_attention_layer_norm""" )
A__ , A__ , A__ , A__ : Optional[int] =tax_attention_lookup(__snake_case, __snake_case, """encoder""", """attention""" )
A__ : List[str] =layer_norm
A__ : Dict =k.T
A__ : Optional[int] =o.T
A__ : str =q.T
A__ : Any =v.T
# Block i, layer 1 (MLP).
A__ : List[Any] =tax_layer_norm_lookup(__snake_case, __snake_case, """encoder""", """pre_mlp_layer_norm""" )
A__ , A__ : int =tax_mlp_lookup(__snake_case, __snake_case, """encoder""", __snake_case )
A__ : Optional[int] =layer_norm
if split_mlp_wi:
A__ : List[str] =wi[0].T
A__ : List[str] =wi[1].T
else:
A__ : Optional[int] =wi.T
A__ : Optional[Any] =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
A__ : int =tax_relpos_bias_lookup(
__snake_case, __snake_case, """encoder""" ).T
A__ : Optional[int] =old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
A__ : List[Any] =tax_relpos_bias_lookup(
__snake_case, 0, """encoder""" ).T
A__ : Tuple =tax_relpos_bias_lookup(
__snake_case, 0, """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__snake_case ):
# Block i, layer 0 (Self Attention).
A__ : List[str] =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_self_attention_layer_norm""" )
A__ , A__ , A__ , A__ : List[str] =tax_attention_lookup(__snake_case, __snake_case, """decoder""", """self_attention""" )
A__ : str =layer_norm
A__ : List[str] =k.T
A__ : int =o.T
A__ : Tuple =q.T
A__ : Optional[Any] =v.T
# Block i, layer 1 (Cross Attention).
A__ : int =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_cross_attention_layer_norm""" )
A__ , A__ , A__ , A__ : Optional[Any] =tax_attention_lookup(__snake_case, __snake_case, """decoder""", """encoder_decoder_attention""" )
A__ : str =layer_norm
A__ : Union[str, Any] =k.T
A__ : str =o.T
A__ : Any =q.T
A__ : str =v.T
# Block i, layer 2 (MLP).
A__ : str =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_mlp_layer_norm""" )
A__ , A__ : Optional[int] =tax_mlp_lookup(__snake_case, __snake_case, """decoder""", __snake_case )
A__ : Dict =layer_norm
if split_mlp_wi:
A__ : List[Any] =wi[0].T
A__ : Union[str, Any] =wi[1].T
else:
A__ : Optional[int] =wi.T
A__ : str =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
A__ : str =tax_relpos_bias_lookup(__snake_case, __snake_case, """decoder""" ).T
A__ : str =old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A__ : Tuple =old["""decoder/logits_dense/kernel"""].T
return new
def __lowerCamelCase ( __snake_case : Dict, __snake_case : bool ) -> Optional[Any]:
"""simple docstring"""
A__ : Any =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A__ : Union[str, Any] =state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A__ : List[str] =state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
A__ : Optional[Any] =state_dict["""shared.weight"""]
return state_dict
def __lowerCamelCase ( __snake_case : str, __snake_case : str, __snake_case : Optional[Any], __snake_case : int, __snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ : str =checkpoints.load_tax_checkpoint(__snake_case )
A__ : Optional[Any] =convert_tax_to_pytorch(
__snake_case, num_layers=config.num_layers, is_encoder_only=__snake_case, scalable_attention=__snake_case )
A__ : str =make_state_dict(__snake_case, __snake_case )
model.load_state_dict(__snake_case, strict=__snake_case )
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Dict, __snake_case : Optional[int], __snake_case : bool = False, __snake_case : bool = False, ) -> Dict:
"""simple docstring"""
A__ : Tuple =MTaConfig.from_json_file(__snake_case )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A__ : List[Any] =UMTaEncoderModel(__snake_case )
else:
A__ : int =UMTaForConditionalGeneration(__snake_case )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(__snake_case )
# Verify that we can load the checkpoint.
model.from_pretrained(__snake_case )
print("""Done""" )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
__snake_case : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 134
| 1
|
"""simple docstring"""
UpperCAmelCase: List[str] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 336
|
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336
| 1
|
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCamelCase__ = """src/diffusers"""
# Matches is_xxx_available()
lowerCamelCase__ = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
lowerCamelCase__ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
lowerCamelCase__ = """
{0} = None
"""
lowerCamelCase__ = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
lowerCamelCase__ = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = _re_backend.findall(_UpperCamelCase )
if len(_UpperCamelCase ) == 0:
return None
return "_and_".join(_UpperCamelCase )
def __lowerCAmelCase ():
with open(os.path.join(_UpperCamelCase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCAmelCase : Union[str, Any] = f.readlines()
# Get to the point we do the actual imports for type checking
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : Dict = {}
# Go through the end of the file
while line_index < len(_UpperCamelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__lowerCAmelCase : Tuple = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__lowerCAmelCase : Any = []
# Until we unindent, add backend objects to the list
while line_index < len(_UpperCamelCase ) and len(lines[line_index] ) > 1:
__lowerCAmelCase : Optional[int] = lines[line_index]
__lowerCAmelCase : Optional[int] = _re_single_line_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_UpperCamelCase ) > 0:
__lowerCAmelCase : List[Any] = objects
else:
line_index += 1
return backend_specific_objects
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
if name.isupper():
return DUMMY_CONSTANT.format(_UpperCamelCase )
elif name.islower():
return DUMMY_FUNCTION.format(_UpperCamelCase , _UpperCamelCase )
else:
return DUMMY_CLASS.format(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase=None ):
if backend_specific_objects is None:
__lowerCAmelCase : Dict = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__lowerCAmelCase : Any = {}
for backend, objects in backend_specific_objects.items():
__lowerCAmelCase : List[Any] = '[' + ', '.join(F"\"{b}\"" for b in backend.split('_and_' ) ) + ']'
__lowerCAmelCase : Optional[int] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_UpperCamelCase , _UpperCamelCase ) for o in objects] )
__lowerCAmelCase : Optional[Any] = dummy_file
return dummy_files
def __lowerCAmelCase (_UpperCamelCase=False ):
__lowerCAmelCase : str = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__lowerCAmelCase : Optional[Any] = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__lowerCAmelCase : Dict = os.path.join(_UpperCamelCase , 'utils' )
__lowerCAmelCase : Dict = {
backend: os.path.join(_UpperCamelCase , F"dummy_{short_names.get(_UpperCamelCase , _UpperCamelCase )}_objects.py" )
for backend in dummy_files.keys()
}
__lowerCAmelCase : str = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_UpperCamelCase ):
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCAmelCase : Union[str, Any] = f.read()
else:
__lowerCAmelCase : List[str] = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(_UpperCamelCase , _UpperCamelCase )}_objects.py as the main "
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F"diffusers.utils.dummy_{short_names.get(_UpperCamelCase , _UpperCamelCase )}_objects.py. Run `make fix-copies` "
'to fix this.' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCamelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 86
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = BarthezTokenizer
A_ : Tuple = BarthezTokenizerFast
A_ : Dict = True
A_ : List[str] = True
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tokenizer
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = '<pad>'
__lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2]
__lowerCAmelCase : Optional[int] = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# fmt: off
__lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCAmelCase : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , )
| 86
| 1
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__UpperCAmelCase = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class __a ( tr.AbstractTransform ):
def __init__( self : Dict , UpperCAmelCase : str = " " ):
lowerCAmelCase_ : Tuple = sentence_delimiter
def A ( self : Any , UpperCAmelCase : str ):
return list(UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Optional[int] = []
for sent_idx, sentence in enumerate(UpperCAmelCase ):
chars.extend(self.process_string(UpperCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
__UpperCAmelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__UpperCAmelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__UpperCAmelCase = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__UpperCAmelCase = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
__UpperCAmelCase = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def A ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def A ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]=False ):
if concatenate_texts:
return jiwer.compute_measures(
UpperCAmelCase , UpperCAmelCase , truth_transform=UpperCAmelCase , hypothesis_transform=UpperCAmelCase , )["wer"]
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Dict = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = jiwer.compute_measures(
UpperCAmelCase , UpperCAmelCase , truth_transform=UpperCAmelCase , hypothesis_transform=UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 28
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __a :
def __init__( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=13 , UpperCAmelCase : Any=64 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Any=3 , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : str=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Dict=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=10 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=[1, 16, 4, 4] , UpperCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Tuple = patch_size
lowerCAmelCase_ : Union[str, Any] = num_channels
lowerCAmelCase_ : List[str] = is_training
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : int = scope
lowerCAmelCase_ : Tuple = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase_ : int = (self.image_size // 32) ** 2
lowerCAmelCase_ : Dict = num_patches + 1
def A ( self : Any ):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCAmelCase , )
def A ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Tuple = ViTHybridModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ):
lowerCAmelCase_ : Tuple = self.type_sequence_label_size
lowerCAmelCase_ : Tuple = ViTHybridForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : int = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__snake_case : Dict = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__snake_case : int = False
__snake_case : Tuple = False
__snake_case : Tuple = False
def A ( self : int ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def A ( self : Dict ):
pass
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A ( self : List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Union[str, Any] = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(config=UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase_ : Tuple = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def A ( self : int ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = ViTHybridModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : int ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Any = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowerCAmelCase_ : Optional[Any] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : List[str] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" )
lowerCAmelCase_ : Optional[Any] = model(**UpperCAmelCase )
lowerCAmelCase_ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase_ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 28
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class a__ ( lowercase_ ):
__lowerCAmelCase = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} )
__lowerCAmelCase = Features({"""audio""": Audio()} )
__lowerCAmelCase = Features({"""transcription""": Value("""string""" )} )
__lowerCAmelCase = """audio"""
__lowerCAmelCase = """transcription"""
def __magic_name__ ( self , _a ):
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __UpperCamelCase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowercase : int = copy.deepcopy(self )
lowercase : List[str] = self.input_schema.copy()
lowercase : List[str] = features[self.audio_column]
lowercase : List[str] = input_schema
return task_template
@property
def __magic_name__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 202
|
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
lowercase = 0
lowercase = 1
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'generated'
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
super().__init__(*__UpperCamelCase ,**__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = {}
if truncation is not None:
lowercase_ : int = truncation
lowercase_ : Dict = generate_kwargs
lowercase_ : List[Any] = {}
if return_tensors is not None and return_type is None:
lowercase_ : Union[str, Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase_ : str = return_type
if clean_up_tokenization_spaces is not None:
lowercase_ : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase_ : Union[str, Any] = self.tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
lowercase_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] ,__UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
lowercase_ : str = ([prefix + arg for arg in args[0]],)
lowercase_ : Union[str, Any] = True
elif isinstance(args[0] ,__UpperCamelCase ):
lowercase_ : Union[str, Any] = (prefix + args[0],)
lowercase_ : Union[str, Any] = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
lowercase_ : List[Any] = self.tokenizer(*__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
if (
isinstance(args[0] ,__UpperCamelCase )
and all(isinstance(__UpperCamelCase ,__UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Any = self._parse_and_tokenize(__UpperCamelCase ,truncation=__UpperCamelCase ,**__UpperCamelCase )
return inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if self.framework == "pt":
lowercase_ , lowercase_ : Optional[int] = model_inputs['input_ids'].shape
elif self.framework == "tf":
lowercase_ , lowercase_ : Union[str, Any] = tf.shape(model_inputs['input_ids'] ).numpy()
lowercase_ : str = generate_kwargs.get('min_length' ,self.model.config.min_length )
lowercase_ : List[Any] = generate_kwargs.get('max_length' ,self.model.config.max_length )
self.check_inputs(__UpperCamelCase ,generate_kwargs['min_length'] ,generate_kwargs['max_length'] )
lowercase_ : Tuple = self.model.generate(**__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : str = output_ids.shape[0]
if self.framework == "pt":
lowercase_ : List[Any] = output_ids.reshape(__UpperCamelCase ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
lowercase_ : List[Any] = tf.reshape(__UpperCamelCase ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=ReturnType.TEXT ,__UpperCamelCase=False ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase_ : List[Any] = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowercase_ : str = {
f'''{self.return_name}_text''': self.tokenizer.decode(
__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase ,)
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'summary'
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'translation'
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> int:
'''simple docstring'''
if getattr(self.tokenizer ,'_build_translation_inputs' ,__UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase ,return_tensors=self.framework ,truncation=__UpperCamelCase ,src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase ,truncation=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ : int = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
lowercase_ : str = src_lang
if tgt_lang is not None:
lowercase_ : Optional[Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase_ : Tuple = kwargs.get('task' ,self.task )
lowercase_ : List[str] = task.split('_' )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
lowercase_ : Union[str, Any] = items[1]
lowercase_ : Tuple = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
return super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
| 213
| 0
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCAmelCase__ ( __lowercase ):
a__ : Optional[int] = """"""
a__ : List[str] = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[DatasetInfo] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> List[Any]:
super().__init__(self , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = repo_info
__lowerCamelCase = token
__lowerCamelCase = None
def __A ( self : int ) -> List[Any]:
if self.dir_cache is None:
__lowerCamelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__lowerCamelCase = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(SCREAMING_SNAKE_CASE__ ): {'''name''': str(SCREAMING_SNAKE_CASE__ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str = "rb" , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Tuple:
if not isinstance(self.repo_info , SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__lowerCamelCase = hf_hub_url(self.repo_info.id , SCREAMING_SNAKE_CASE__ , revision=self.repo_info.sha )
return fsspec.open(
SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ , headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE__ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
self._get_dirs()
__lowerCamelCase = self._strip_protocol(SCREAMING_SNAKE_CASE__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(SCREAMING_SNAKE_CASE__ )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any:
self._get_dirs()
__lowerCamelCase = PurePosixPath(path.strip('''/''' ) )
__lowerCamelCase = {}
for p, f in self.dir_cache.items():
__lowerCamelCase = PurePosixPath(p.strip('''/''' ) )
__lowerCamelCase = p.parent
if root == path:
__lowerCamelCase = f
__lowerCamelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 364
|
from __future__ import annotations
def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int | None = None , __lowerCAmelCase : int | None = None ) -> None:
if start is None:
__lowerCamelCase = 0
if end is None:
__lowerCamelCase = len(__lowerCAmelCase ) - 1
if start >= end:
return
__lowerCamelCase = (start + end) // 2
slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
if sequence[end] < sequence[mid]:
__lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end]
slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 339
| 0
|
'''simple docstring'''
import unittest
from transformers import DonutProcessor
lowerCamelCase : Tuple = 'naver-clova-ix/donut-base'
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__ = self.processor.tokenajson(UpperCamelCase )
self.assertDictEqual(UpperCamelCase , UpperCamelCase )
| 2
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_snake_case : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_snake_case : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_snake_case : set[int] = {ord(char) for char in VALID_CHARS}
_snake_case : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a_ ( lowerCAmelCase_ : list[int], lowerCAmelCase_ : tuple[int, ...] ):
__lowerCAmelCase = ""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(lowerCAmelCase_ ), lowerCAmelCase_ ):
__lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCAmelCase_ )
return decoded
def a_ ( lowerCAmelCase_ : list[int] ):
__lowerCAmelCase = []
for key in product(lowerCAmelCase_, repeat=3 ):
__lowerCAmelCase = try_key(lowerCAmelCase_, lowerCAmelCase_ )
if encoded is not None:
possibles.append(lowerCAmelCase_ )
return possibles
def a_ ( lowerCAmelCase_ : list[str], lowerCAmelCase_ : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( lowerCAmelCase_ : str = "p059_cipher.txt" ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = Path(lowerCAmelCase_ ).parent.joinpath(lowerCAmelCase_ ).read_text(encoding='utf-8' )
__lowerCAmelCase = [int(lowerCAmelCase_ ) for number in data.strip().split(',' )]
__lowerCAmelCase = filter_valid_chars(lowerCAmelCase_ )
for common_word in COMMON_WORDS:
__lowerCAmelCase = filter_common_word(lowerCAmelCase_, lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 1:
break
__lowerCAmelCase = possibles[0]
return sum(ord(lowerCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 284
| 0
|
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : list[str] | None = None ,lowerCamelCase_ : dict[str, float] | None = None ,lowerCamelCase_ : bool = False ,):
'''simple docstring'''
lowerCAmelCase__ : str = cipher_alphabet or [chr(lowerCamelCase_) for i in range(97 ,123)]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCAmelCase__ : str = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
lowerCAmelCase__ : Any = frequencies_dict
if not case_sensitive:
lowerCAmelCase__ : Dict = ciphertext.lower()
# Chi squared statistic values
lowerCAmelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowerCamelCase_)):
lowerCAmelCase__ : List[Any] = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCAmelCase__ : Any = (alphabet_letters.index(letter.lower()) - shift) % len(
lowerCamelCase_)
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCAmelCase__ : Union[str, Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCAmelCase__ : int = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase__ : Any = decrypted_with_shift.lower().count(lowerCamelCase_)
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase__ : List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase__ : int = decrypted_with_shift.count(lowerCamelCase_)
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase__ : int = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase__ : Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCAmelCase__ : str = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCamelCase_ : int) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCAmelCase__ : int = min(
lowerCamelCase_ ,key=lowerCamelCase_ ,)
# Get all the data from the most likely cipher (key, decoded message)
(
lowerCAmelCase__
) : List[Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 361
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : List[Any]=1024 ,lowerCamelCase_ : int=1024 ,lowerCamelCase_ : Dict=False ,**lowerCamelCase_ : Tuple):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = SeqaSeqDataset(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,type_path='''train''' ,**lowerCamelCase_)
lowerCAmelCase__ : int = tok.pad_token_id
def get_lens(lowerCamelCase_ : Tuple):
lowerCAmelCase__ : Tuple = tqdm(
DataLoader(lowerCamelCase_ ,batch_size=512 ,num_workers=8 ,shuffle=lowerCamelCase_ ,collate_fn=ds.collate_fn) ,desc=str(ds.len_file) ,)
lowerCAmelCase__ : Tuple = []
for batch in dl:
lowerCAmelCase__ : Dict = batch['''input_ids'''].ne(lowerCamelCase_).sum(1).tolist()
lowerCAmelCase__ : Dict = batch['''labels'''].ne(lowerCamelCase_).sum(1).tolist()
if consider_target:
for src, tgt in zip(lowerCamelCase_ ,lowerCamelCase_):
max_lens.append(max(lowerCamelCase_ ,lowerCamelCase_))
else:
max_lens.extend(lowerCamelCase_)
return max_lens
lowerCAmelCase__ : str = get_lens(lowerCamelCase_)
lowerCAmelCase__ : Tuple = SeqaSeqDataset(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,type_path='''val''' ,**lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = get_lens(lowerCamelCase_)
pickle_save(lowerCamelCase_ ,train_ds.len_file)
pickle_save(lowerCamelCase_ ,val_ds.len_file)
if __name__ == "__main__":
fire.Fire(save_len_file)
| 94
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = SwinConfig(image_size=192 )
if "base" in model_name:
__lowerCAmelCase = 6
__lowerCAmelCase = 128
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (4, 8, 16, 32)
elif "large" in model_name:
__lowerCAmelCase = 12
__lowerCAmelCase = 192
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
__lowerCAmelCase = window_size
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = num_heads
return config
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if "encoder.mask_token" in name:
__lowerCAmelCase = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowerCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowerCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowerCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
__lowerCAmelCase = "layernorm.weight"
if name == "encoder.norm.bias":
__lowerCAmelCase = "layernorm.bias"
if "decoder" in name:
pass
else:
__lowerCAmelCase = "swin." + name
return name
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(_UpperCamelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
__lowerCAmelCase = key.split("." )
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = int(key_split[4] )
__lowerCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[
:dim
]
__lowerCAmelCase = val[
dim : dim * 2
]
__lowerCAmelCase = val[
-dim:
]
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )["model"]
__lowerCAmelCase = get_swin_config(_UpperCamelCase )
__lowerCAmelCase = SwinForMaskedImageModeling(_UpperCamelCase )
model.eval()
__lowerCAmelCase = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = ViTImageProcessor(size={"height": 192, "width": 192} )
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
__lowerCAmelCase = image_processor(images=_UpperCamelCase , return_tensors="pt" )
with torch.no_grad():
__lowerCAmelCase = model(**_UpperCamelCase ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print(f"Pushing model and image processor for {model_name} to hub" )
model.push_to_hub(f"microsoft/{model_name}" )
image_processor.push_to_hub(f"microsoft/{model_name}" )
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A : Optional[int] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 57
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : List[str] = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : str = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowercase_ : Optional[int] = '''-''' if number.startswith('''-''' ) else ''''''
lowercase_ : Union[str, Any] = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93
| 0
|
"""simple docstring"""
from __future__ import annotations
__lowercase = 8.988e9 # units = N * m^s * C^-2
def lowerCAmelCase (__UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ):
"""simple docstring"""
__UpperCamelCase =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__UpperCamelCase =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__UpperCamelCase =abs(__UpperCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__UpperCamelCase =abs(__UpperCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__UpperCamelCase =(COULOMBS_CONSTANT * charge_product / abs(__UpperCamelCase )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''ConditionalDetrFeatureExtractor''']
__lowercase = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 85
| 0
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCamelCase = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = list(s_dict.keys() )
for key in keys:
lowerCAmelCase__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCAmelCase__ : Optional[Any] = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'{key} -> {new_key}' )
lowerCAmelCase__ : Tuple = s_dict.pop(SCREAMING_SNAKE_CASE__ )
return s_dict
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Any = emb.weight.shape
lowerCAmelCase__ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Optional[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Tuple = os.path.basename(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Tuple = url.split('''/''' )[-2]
lowerCAmelCase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise RuntimeError(f'{download_target} exists and is not a regular file' )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ : List[Any] = open(SCREAMING_SNAKE_CASE__ , '''rb''' ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(SCREAMING_SNAKE_CASE__ ) as source, open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=SCREAMING_SNAKE_CASE__ , unit_divisor=1_024 ) as loop:
while True:
lowerCAmelCase__ : Tuple = source.read(8_192 )
if not buffer:
break
output.write(SCREAMING_SNAKE_CASE__ )
loop.update(len(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ : List[str] = open(SCREAMING_SNAKE_CASE__ , '''rb''' ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
if ".pt" not in checkpoint_path:
lowerCAmelCase__ : Any = _download(_MODELS[checkpoint_path] )
else:
lowerCAmelCase__ : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
lowerCAmelCase__ : Union[str, Any] = original_checkpoint['''dims''']
lowerCAmelCase__ : List[str] = original_checkpoint['''model_state_dict''']
lowerCAmelCase__ : List[Any] = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
rename_keys(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Optional[int] = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowerCAmelCase__ : str = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=SCREAMING_SNAKE_CASE__ , decoder_ffn_dim=SCREAMING_SNAKE_CASE__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowerCAmelCase__ : Tuple = WhisperForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : str = model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0 and not set(SCREAMING_SNAKE_CASE__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f' but all the following weights are missing {missing}' )
if tie_embeds:
lowerCAmelCase__ : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase__ : str = proj_out_weights
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCamelCase = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 131
|
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ):
UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE__ )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCamelCase :Dict = 0
print(SCREAMING_SNAKE_CASE__ , end=''',''' )
# Consider rest of the activities
for j in range(SCREAMING_SNAKE_CASE__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(SCREAMING_SNAKE_CASE__ , end=''',''' )
UpperCamelCase :List[str] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = [1, 3, 0, 5, 8, 5]
__snake_case = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 259
| 0
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """LayoutLMv2ImageProcessor"""
_SCREAMING_SNAKE_CASE = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self : str , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : List[Any] ):
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : Dict = kwargs.pop('feature_extractor' )
lowerCAmelCase_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , SCREAMING_SNAKE_CASE_ : Union[List[List[int]], List[List[List[int]]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[List[int], List[List[int]]]] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Union[bool, str, TruncationStrategy] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE_ : Tuple , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
lowerCAmelCase_ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Dict = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase_ : str = features['words']
lowerCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# add pixel values
lowerCAmelCase_ : int = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCAmelCase_ : Tuple = self.get_overflowing_images(SCREAMING_SNAKE_CASE_ , encoded_inputs['overflow_to_sample_mapping'] )
lowerCAmelCase_ : Tuple = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCAmelCase_ : List[Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F" {len(SCREAMING_SNAKE_CASE_ )} and {len(SCREAMING_SNAKE_CASE_ )}" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Tuple ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 289
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """open-llama"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=1_0_0_0_0_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : List[Any]=1_1_0_0_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE_ : str="silu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-6 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : List[str] = rms_norm_eps
lowerCAmelCase_ : List[Any] = use_cache
lowerCAmelCase_ : Optional[int] = kwargs.pop(
'use_memorry_efficient_attention' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_dropout_prob
lowerCAmelCase_ : Tuple = use_stable_embedding
lowerCAmelCase_ : Optional[Any] = shared_input_output_embedding
lowerCAmelCase_ : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}" )
lowerCAmelCase_ : int = self.rope_scaling.get('type' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = self.rope_scaling.get('factor' , SCREAMING_SNAKE_CASE_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 289
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=1_3 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase__ : str=[2, 2, 3, 2] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=1_0 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : int=["stage2", "stage3", "stage4"] , UpperCamelCase__ : List[str]=[2, 3, 4] , UpperCamelCase__ : Any=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_stages
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : List[str] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Optional[int] ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def A ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def A ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def A ( self : Optional[int] ):
"""simple docstring"""
pass
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCamelCase__ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
UpperCamelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ConvNextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase__ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCamelCase__ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = ConvNextConfig
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
| 28
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __lowerCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'unispeech-sat'
def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.1 , _a=0.1 , _a=0.02 , _a=1E-5 , _a="group" , _a="gelu" , _a=(512, 512, 512, 512, 512, 512, 512) , _a=(5, 2, 2, 2, 2, 2, 2) , _a=(10, 3, 3, 3, 3, 2, 2) , _a=False , _a=128 , _a=16 , _a=False , _a=True , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a=320 , _a=2 , _a=0.1 , _a=100 , _a=256 , _a=256 , _a=0.1 , _a="mean" , _a=False , _a=False , _a=256 , _a=(512, 512, 512, 512, 1_500) , _a=(5, 3, 3, 1, 1) , _a=(1, 2, 3, 1, 1) , _a=512 , _a=0 , _a=1 , _a=2 , _a=504 , **_a , ):
super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase )
__a = hidden_size
__a = feat_extract_norm
__a = feat_extract_activation
__a = list(__lowerCamelCase )
__a = list(__lowerCamelCase )
__a = list(__lowerCamelCase )
__a = conv_bias
__a = num_conv_pos_embeddings
__a = num_conv_pos_embedding_groups
__a = len(self.conv_dim )
__a = num_hidden_layers
__a = intermediate_size
__a = hidden_act
__a = num_attention_heads
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = feat_proj_dropout
__a = final_dropout
__a = layerdrop
__a = layer_norm_eps
__a = initializer_range
__a = vocab_size
__a = num_clusters
__a = do_stable_layer_norm
__a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a = apply_spec_augment
__a = mask_time_prob
__a = mask_time_length
__a = mask_time_min_masks
__a = mask_feature_prob
__a = mask_feature_length
__a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a = num_codevectors_per_group
__a = num_codevector_groups
__a = contrastive_logits_temperature
__a = feat_quantizer_dropout
__a = num_negatives
__a = codevector_dim
__a = proj_codevector_dim
__a = diversity_loss_weight
# ctc loss
__a = ctc_loss_reduction
__a = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__a = list(__lowerCamelCase )
__a = list(__lowerCamelCase )
__a = list(__lowerCamelCase )
__a = xvector_output_dim
@property
def __UpperCAmelCase ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 355
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__a = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sgugger/tiny-distilbert-classification'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
__a = None
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , )
__a = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , '''sequential''' ) )
self.assertTrue(hasattr(_a , '''cumulative''' ) )
self.assertTrue(hasattr(_a , '''current''' ) )
self.assertTrue(hasattr(_a , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
| 11
| 0
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowerCamelCase__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowerCamelCase__ = [0, 25, 50]
lowerCamelCase__ = [25, 50, 75]
lowerCamelCase__ = fuzz.membership.trimf(X, abca)
lowerCamelCase__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowerCamelCase__ = np.ones(75)
lowerCamelCase__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowerCamelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowerCamelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowerCamelCase__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowerCamelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowerCamelCase__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowerCamelCase__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowerCamelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowerCamelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 234
|
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase__ = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = create_model(
"HTSAT-tiny" , "roberta" , __lowerCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=__lowerCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : str = R".*sequential.(\d+).*"
_UpperCAmelCase : Any = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_UpperCAmelCase : Union[str, Any] = key.replace(__lowerCAmelCase , __lowerCAmelCase )
if re.match(__lowerCAmelCase , __lowerCAmelCase ):
# replace sequential layers with list
_UpperCAmelCase : List[Any] = re.match(__lowerCAmelCase , __lowerCAmelCase ).group(1 )
_UpperCAmelCase : Optional[int] = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__lowerCAmelCase )//3}.linear.""" )
elif re.match(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = int(re.match(__lowerCAmelCase , __lowerCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_UpperCAmelCase : str = 1 if projecton_layer == 0 else 2
_UpperCAmelCase : Tuple = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_UpperCAmelCase : List[str] = value
_UpperCAmelCase : Tuple = mixed_qkv.size(0 ) // 3
_UpperCAmelCase : Union[str, Any] = mixed_qkv[:qkv_dim]
_UpperCAmelCase : int = mixed_qkv[qkv_dim : qkv_dim * 2]
_UpperCAmelCase : Optional[int] = mixed_qkv[qkv_dim * 2 :]
_UpperCAmelCase : List[Any] = query_layer
_UpperCAmelCase : int = key_layer
_UpperCAmelCase : Any = value_layer
else:
_UpperCAmelCase : Dict = value
return model_state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
_UpperCAmelCase , _UpperCAmelCase : List[str] = init_clap(__lowerCAmelCase , enable_fusion=__lowerCAmelCase )
clap_model.eval()
_UpperCAmelCase : List[str] = clap_model.state_dict()
_UpperCAmelCase : str = rename_state_dict(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = ClapConfig()
_UpperCAmelCase : str = enable_fusion
_UpperCAmelCase : Union[str, Any] = ClapModel(__lowerCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
transformers_config.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowerCamelCase__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 234
| 1
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCamelCase__ ( a , a=() , a=None , a="no" , a="29500" ) -> int:
_A: Dict = False
_A: Dict = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
_A: Any = True
elif "IPython" in sys.modules:
_A: Any = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
_A: Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , a ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
_A: Dict = 8
_A: Tuple = PrepareForLaunch(a , distributed_type='''TPU''' )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(a , args=a , nprocs=a , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*a )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a , master_addr='''127.0.01''' , master_port=a , mixed_precision=a ):
_A: Optional[Any] = PrepareForLaunch(a , distributed_type='''MULTI_GPU''' )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(a , args=a , nprocs=a , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_A: Any = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*a )
def lowerCamelCase__ ( a , a=() , a=2 ) -> List[str]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
_A: int = PrepareForLaunch(a , debug=a )
start_processes(a , args=a , nprocs=a , start_method='''fork''' )
| 301
|
import os
from pathlib import Path
def lowerCamelCase__ ( ) -> Optional[Any]:
from torch.utils.cpp_extension import load
_A: str = Path(a ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_A: Tuple = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , a , with_cuda=a , extra_include_paths=[str(a )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = '▁'
UpperCAmelCase__ : Dict = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCAmelCase__ : Union[str, Any] = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
UpperCAmelCase__ : Dict = {
'facebook/mbart-large-50-one-to-many-mmt': 1024,
}
# fmt: off
UpperCAmelCase__ : str = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : List[Any]="<pad>" , lowerCAmelCase_ : List[Any]="<mask>" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
_A: Dict = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
_A: Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
_A: Dict = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
_A: Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase_ ) )
_A: Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_A: Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_A: Optional[int] = 1
_A: Dict = len(self.sp_model )
_A: List[str] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase_ )
}
_A: Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
_A: Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_A: Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_A: str = src_lang if src_lang is not None else '''en_XX'''
_A: Dict = self.lang_code_to_id[self._src_lang]
_A: List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __magic_name__ ( self : str ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __magic_name__ ( self : str ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __magic_name__ ( self : Any , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
_A: Tuple = self.__dict__.copy()
_A: int = None
return state
def __setstate__( self : str , lowerCAmelCase_ : Dict ):
"""simple docstring"""
_A: Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A: Dict = {}
_A: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Optional[Any] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self : Dict , lowerCAmelCase_ : str ):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A: Dict = self.sp_model.PieceToId(lowerCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self : Any , lowerCAmelCase_ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A: List[Any] = []
_A: Union[str, Any] = ''''''
_A: List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
_A: Optional[Any] = True
_A: Dict = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
_A: List[Any] = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: Union[str, Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , '''wb''' ) as fi:
_A: Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
_A: List[str] = [1] * len(self.prefix_tokens )
_A: int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase_ )) + ([0] * len(lowerCAmelCase_ )) + suffix_ones
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_A: Union[str, Any] = src_lang
_A: List[Any] = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
_A: int = self.convert_tokens_to_ids(lowerCAmelCase_ )
_A: Optional[int] = tgt_lang_id
return inputs
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
_A: Any = src_lang
_A: Dict = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: Union[str, Any] = self.lang_code_to_id[src_lang]
_A: List[Any] = [self.cur_lang_code_id]
_A: Tuple = [self.eos_token_id]
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: Optional[Any] = self.lang_code_to_id[tgt_lang]
_A: List[Any] = [self.cur_lang_code_id]
_A: Dict = [self.eos_token_id]
| 121
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = '''maskformer-swin'''
__UpperCamelCase : Any = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Dict=9_6 , lowerCAmelCase_ : Union[str, Any]=[2, 2, 6, 2] , lowerCAmelCase_ : Optional[Any]=[3, 6, 1_2, 2_4] , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Optional[Any]=4.0 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : str=1e-5 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: List[Any] = image_size
_A: Optional[int] = patch_size
_A: Optional[Any] = num_channels
_A: str = embed_dim
_A: Any = depths
_A: str = len(lowerCAmelCase_ )
_A: Any = num_heads
_A: int = window_size
_A: Dict = mlp_ratio
_A: str = qkv_bias
_A: List[str] = hidden_dropout_prob
_A: List[Any] = attention_probs_dropout_prob
_A: Dict = drop_path_rate
_A: List[Any] = hidden_act
_A: Optional[int] = use_absolute_embeddings
_A: Tuple = layer_norm_eps
_A: Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A: Any = int(embed_dim * 2 ** (len(lowerCAmelCase_ ) - 1) )
_A: Tuple = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
_A , _A: str = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 121
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 217
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = "gpt_neox_japanese"
def __init__(self ,_lowerCamelCase=32000 ,_lowerCamelCase=2560 ,_lowerCamelCase=32 ,_lowerCamelCase=32 ,_lowerCamelCase=4 ,_lowerCamelCase="gelu" ,_lowerCamelCase=1.0_0 ,_lowerCamelCase=10000 ,_lowerCamelCase=2048 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-5 ,_lowerCamelCase=True ,_lowerCamelCase=31996 ,_lowerCamelCase=31999 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.0 ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_multiple_size
__lowercase = hidden_act
__lowercase = rotary_pct
__lowercase = rotary_emb_base
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_cache
__lowercase = attention_dropout
__lowercase = hidden_dropout
| 217
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
snake_case_ :str = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
snake_case_ :Dict = in_proj_weight[
: encoder_config.hidden_size, :
]
snake_case_ :Dict = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
snake_case_ :Tuple = in_proj_weight[
-encoder_config.hidden_size :, :
]
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = dct.pop(_SCREAMING_SNAKE_CASE )
snake_case_ :Dict = val
def A_ ( _lowercase ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
snake_case_ :Tuple = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
snake_case_ :Any = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
snake_case_ :str = Image.open(requests.get(_SCREAMING_SNAKE_CASE, stream=_SCREAMING_SNAKE_CASE ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = ViTConfig(image_size=384, qkv_bias=_SCREAMING_SNAKE_CASE )
snake_case_ :Tuple = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
snake_case_ :int = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
snake_case_ :str = 1024
snake_case_ :Optional[int] = 4096
snake_case_ :Tuple = 24
snake_case_ :List[Any] = 16
snake_case_ :Optional[Any] = 1024
else:
raise ValueError("""Should either find \'base\' or \'large\' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
snake_case_ :Optional[Any] = False
snake_case_ :Optional[Any] = 'relu'
snake_case_ :Any = 1024
snake_case_ :List[Any] = True
snake_case_ :Dict = False
snake_case_ :Optional[Any] = False
# load HuggingFace model
snake_case_ :str = ViTModel(_SCREAMING_SNAKE_CASE, add_pooling_layer=_SCREAMING_SNAKE_CASE )
snake_case_ :int = TrOCRForCausalLM(_SCREAMING_SNAKE_CASE )
snake_case_ :List[Any] = VisionEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE, decoder=_SCREAMING_SNAKE_CASE )
model.eval()
# load state_dict of original model, rename some keys
snake_case_ :Optional[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE, map_location="""cpu""", check_hash=_SCREAMING_SNAKE_CASE )['model']
snake_case_ :Union[str, Any] = create_rename_keys(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
snake_case_ :List[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
if key.startswith("""decoder""" ) and "output_projection" not in key:
snake_case_ :List[Any] = val
else:
snake_case_ :Dict = val
# load state dict
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image
snake_case_ :Optional[Any] = ViTImageProcessor(size=encoder_config.image_size )
snake_case_ :Dict = RobertaTokenizer.from_pretrained("""roberta-large""" )
snake_case_ :List[str] = TrOCRProcessor(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
snake_case_ :Optional[Any] = processor(images=prepare_img(_SCREAMING_SNAKE_CASE ), return_tensors="""pt""" ).pixel_values
# verify logits
snake_case_ :List[str] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
snake_case_ :List[Any] = model(pixel_values=_SCREAMING_SNAKE_CASE, decoder_input_ids=_SCREAMING_SNAKE_CASE )
snake_case_ :Dict = outputs.logits
snake_case_ :Dict = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
snake_case_ :Optional[Any] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
snake_case_ :Tuple = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
snake_case_ :Dict = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
snake_case_ :Optional[Any] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10], _SCREAMING_SNAKE_CASE, atol=1e-3 ), "First elements of logits not as expected"
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__a = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 66
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 1_0
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = [1, 2, 3, 4]
lowerCAmelCase__ :Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = process_story(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = ''
lowerCAmelCase__ , lowerCAmelCase__ :Any = process_story(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [] )
self.assertEqual(__UpperCAmelCase , [] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
lowerCAmelCase__ , lowerCAmelCase__ :str = process_story(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = ['It was the best of times.']
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = torch.tensor([1, 2, 3, 4] )
lowerCAmelCase__ :List[str] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
lowerCAmelCase__ :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 2_3 ).numpy() , expected.numpy() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowerCAmelCase__ :Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 1_0_1
lowerCAmelCase__ :str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
lowerCAmelCase__ :Any = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowerCAmelCase__ :List[Any] = compute_token_type_ids(__UpperCAmelCase , __UpperCAmelCase )
np.testing.assert_array_equal(__UpperCAmelCase , __UpperCAmelCase )
| 293
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __lowerCamelCase ( snake_case__=None ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(add_help=snake_case__ ,allow_abbrev=snake_case__ )
# The main config parser
_SCREAMING_SNAKE_CASE = config_command_parser(snake_case__ )
# The subparser to add commands to
_SCREAMING_SNAKE_CASE = config_parser.add_subparsers(title="""subcommands""" ,dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(snake_case__ ,parents=[parent_parser] )
update_command_parser(snake_case__ ,parents=[parent_parser] )
return config_parser
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_config_parser()
_SCREAMING_SNAKE_CASE = config_parser.parse_args()
if not hasattr(snake_case__ ,"""func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(snake_case__ )
if __name__ == "__main__":
main()
| 125
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''LayoutLMv3FeatureExtractor''']
UpperCamelCase = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 125
| 1
|
"""simple docstring"""
from math import ceil
def _lowerCAmelCase ( UpperCamelCase_ = 1001 ):
__SCREAMING_SNAKE_CASE = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__SCREAMING_SNAKE_CASE = 2 * i + 1
__SCREAMING_SNAKE_CASE = 2 * i
__SCREAMING_SNAKE_CASE = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__magic_name__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 100
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __UpperCAmelCase :
def __magic_name__ ( self : int, __A : Dict ):
raise NotImplementedError()
def __magic_name__ ( self : int ):
raise NotImplementedError()
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ):
UpperCAmelCase : List[str] = tokenizer
UpperCAmelCase : str = skip_prompt
UpperCAmelCase : List[str] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = True
def __magic_name__ ( self : Dict, __A : Optional[int] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
UpperCAmelCase : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase : Optional[int] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def __magic_name__ ( self : str ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
UpperCAmelCase : Dict = text[self.print_len :]
UpperCAmelCase : List[Any] = []
UpperCAmelCase : List[Any] = 0
else:
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : str = True
self.on_finalized_text(__A, stream_end=__A )
def __magic_name__ ( self : List[str], __A : str, __A : bool = False ):
print(__A, flush=__A, end='''''' if not stream_end else None )
def __magic_name__ ( self : List[Any], __A : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ):
super().__init__(__A, __A, **__A )
UpperCAmelCase : Dict = Queue()
UpperCAmelCase : Any = None
UpperCAmelCase : Any = timeout
def __magic_name__ ( self : Dict, __A : str, __A : bool = False ):
self.text_queue.put(__A, timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout )
def __iter__( self : int ):
return self
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 336
| 0
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, **_UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = [x.strip() for x in open(_UpperCAmelCase ).readlines()]
__UpperCAmelCase : List[Any] = [x.strip() for x in open(_UpperCAmelCase ).readlines()][: len(_UpperCAmelCase )]
__UpperCAmelCase : Union[str, Any] = calculate_rouge(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
if save_path is not None:
save_json(_UpperCAmelCase, _UpperCAmelCase, indent=_UpperCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 37
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __UpperCamelCase ( _UpperCAmelCase ):
return "".join(sorted(_UpperCAmelCase ) )
def __UpperCamelCase ( _UpperCAmelCase ):
return word_by_signature[signature(_UpperCAmelCase )]
lowerCAmelCase__ : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
lowerCAmelCase__ : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()})
lowerCAmelCase__ : Tuple = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCAmelCase__ : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 37
| 1
|
import string
import numpy
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase_ )
class _snake_case :
SCREAMING_SNAKE_CASE__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
SCREAMING_SNAKE_CASE__ = numpy.vectorize(lambda _snake_case : x % 36 )
SCREAMING_SNAKE_CASE__ = numpy.vectorize(_snake_case )
def __init__( self , _lowerCamelCase ):
a :List[Any] = self.modulus(_lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
a :int = encrypt_key.shape[0]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.key_string.index(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.key_string[round(_lowerCamelCase )]
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
a :Any = det % len(self.key_string )
a :Dict = len(self.key_string )
if greatest_common_divisor(_lowerCamelCase , len(self.key_string ) ) != 1:
a :int = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[Any] = [char for char in text.upper() if char in self.key_string]
a :List[str] = chars[-1]
while len(_lowerCamelCase ) % self.break_key != 0:
chars.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Dict = self.process_text(text.upper() )
a :List[str] = ''''''
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
a :int = text[i : i + self.break_key]
a :Optional[int] = [self.replace_letters(_lowerCamelCase ) for char in batch]
a :Union[str, Any] = numpy.array([vec] ).T
a :str = self.modulus(self.encrypt_key.dot(_lowerCamelCase ) ).T.tolist()[
0
]
a :List[Any] = ''''''.join(
self.replace_digits(_lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
a :int = det % len(self.key_string )
a :Tuple = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
a :Tuple = i
break
a :List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :List[Any] = self.make_decrypt_key()
a :str = self.process_text(text.upper() )
a :List[Any] = ''''''
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
a :Optional[Any] = text[i : i + self.break_key]
a :List[Any] = [self.replace_letters(_lowerCamelCase ) for char in batch]
a :str = numpy.array([vec] ).T
a :Dict = self.modulus(decrypt_key.dot(_lowerCamelCase ) ).T.tolist()[0]
a :List[Any] = ''''''.join(
self.replace_digits(_lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __lowerCamelCase ( ):
"""simple docstring"""
a :Tuple = int(input('''Enter the order of the encryption key: ''' ) )
a :Dict = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(UpperCAmelCase_ ):
a :List[str] = [int(UpperCAmelCase_ ) for x in input().split()]
hill_matrix.append(UpperCAmelCase_ )
a :Any = HillCipher(numpy.array(UpperCAmelCase_ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
a :Any = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
a :str = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(UpperCAmelCase_ ) )
elif option == "2":
a :Dict = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 94
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = NllbTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
_A : int = vocab_file
_A : Optional[Any] = False if not self.vocab_file else True
_A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "eng_Latn"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : List[Any] = src_lang
_A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Tuple = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Tuple = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> str:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : List[str] = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_A : Tuple = [self.cur_lang_code]
_A : Optional[Any] = [self.eos_token_id]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : Tuple = []
_A : Any = [self.eos_token_id, self.cur_lang_code]
else:
_A : Union[str, Any] = [self.cur_lang_code]
_A : str = [self.eos_token_id]
_A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
_A : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Union[str, Any] = ["onnx"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
requires_backends(self, ['onnx'] )
@classmethod
def snake_case_ ( cls, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
requires_backends(cls, ['onnx'] )
@classmethod
def snake_case_ ( cls, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
requires_backends(cls, ['onnx'] )
| 103
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
__UpperCAmelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
UpperCAmelCase__ : Optional[str] = field(default=a__ , metadata={"help": "A folder containing the training data."} )
UpperCAmelCase__ : Optional[str] = field(default=a__ , metadata={"help": "A folder containing the validation data."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
UpperCAmelCase__ : int = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
UpperCAmelCase__ : float = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : List[Any] = {}
if self.train_dir is not None:
UpperCamelCase : Any = self.train_dir
if self.validation_dir is not None:
UpperCamelCase : Union[str, Any] = self.validation_dir
UpperCamelCase : List[str] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str = field(
default=a__ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(a__ )} , )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
UpperCAmelCase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase__ : str = field(default=a__ , metadata={"help": "Name or path of preprocessor config."} )
UpperCAmelCase__ : bool = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={"help": "Stride to use for the encoder."} , )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_=192, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=0.6 ) -> Optional[Any]:
UpperCamelCase : List[Any] = input_size
UpperCamelCase : Any = mask_patch_size
UpperCamelCase : Tuple = model_patch_size
UpperCamelCase : Optional[Any] = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
UpperCamelCase : Tuple = self.input_size // self.mask_patch_size
UpperCamelCase : int = self.mask_patch_size // self.model_patch_size
UpperCamelCase : Union[str, Any] = self.rand_size**2
UpperCamelCase : str = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> str:
UpperCamelCase : Union[str, Any] = np.random.permutation(self.token_count )[: self.mask_count]
UpperCamelCase : Tuple = np.zeros(self.token_count, dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = 1
UpperCamelCase : Union[str, Any] = mask.reshape((self.rand_size, self.rand_size) )
UpperCamelCase : str = mask.repeat(self.scale, axis=0 ).repeat(self.scale, axis=1 )
return torch.tensor(mask.flatten() )
def UpperCamelCase ( snake_case__ : int ) -> int:
UpperCamelCase : List[Any] = torch.stack([example['pixel_values'] for example in examples] )
UpperCamelCase : Optional[Any] = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCamelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
UpperCamelCase : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase : Tuple = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
UpperCamelCase : List[str] = ds['train'].train_test_split(data_args.train_val_split )
UpperCamelCase : str = split['train']
UpperCamelCase : Any = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : Tuple = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCamelCase : Dict = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case__ )
elif model_args.model_name_or_path:
UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
UpperCamelCase : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case__ , 'decoder_type' ):
UpperCamelCase : Tuple = 'simmim'
# adapt config
UpperCamelCase : List[str] = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCamelCase : str = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCamelCase : Dict = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
UpperCamelCase : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
UpperCamelCase : Optional[int] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCamelCase : Dict = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCamelCase : Union[str, Any] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
UpperCamelCase : Union[str, Any] = AutoModelForMaskedImageModeling.from_config(snake_case__ )
if training_args.do_train:
UpperCamelCase : Optional[int] = ds['train'].column_names
else:
UpperCamelCase : Optional[int] = ds['validation'].column_names
if data_args.image_column_name is not None:
UpperCamelCase : Dict = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase : Union[str, Any] = 'image'
elif "img" in column_names:
UpperCamelCase : int = 'img'
else:
UpperCamelCase : Optional[int] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCamelCase : Optional[int] = Compose(
[
Lambda(lambda snake_case__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCamelCase : Optional[int] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case__ : List[Any] ):
UpperCamelCase : Any = [transforms(snake_case__ ) for image in examples[image_column_name]]
UpperCamelCase : Tuple = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
UpperCamelCase : Tuple = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
UpperCamelCase : str = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Initialize our trainer
UpperCamelCase : Any = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
UpperCamelCase : Dict = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase : Tuple = last_checkpoint
UpperCamelCase : List[Any] = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase : List[str] = trainer.evaluate()
trainer.log_metrics('eval' , snake_case__ )
trainer.save_metrics('eval' , snake_case__ )
# Write model card and (optionally) push to hub
UpperCamelCase : List[str] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main()
| 103
| 1
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
lowerCAmelCase__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
class lowercase_ (_snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Union[str, Any] = '''left'''
def __init__( self : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : List[str]=False ,lowercase__ : Optional[int]=True ,lowercase__ : Optional[Any]=False ,lowercase__ : List[str]="<s>" ,lowercase__ : List[str]="</s>" ,lowercase__ : Union[str, Any]="<unk>" ,lowercase__ : Tuple="<sep>" ,lowercase__ : Union[str, Any]="<pad>" ,lowercase__ : Any="<cls>" ,lowercase__ : Dict="<mask>" ,lowercase__ : List[str]=["<eop>", "<eod>"] ,lowercase__ : Optional[Any] = None ,**lowercase__ : Tuple ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCamelCase ,remove_space=_UpperCamelCase ,keep_accents=_UpperCamelCase ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,sep_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,cls_token=_UpperCamelCase ,mask_token=_UpperCamelCase ,additional_special_tokens=_UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_UpperCamelCase ,)
__lowercase = 3
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : Optional[Any] ,lowercase__ : Dict ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ):
if self.remove_space:
__lowercase = ' '.join(inputs.strip().split() )
else:
__lowercase = inputs
__lowercase = outputs.replace('''``''' ,'''"''' ).replace('''\'\'''' ,'''"''' )
if not self.keep_accents:
__lowercase = unicodedata.normalize('''NFKD''' ,_UpperCamelCase )
__lowercase = ''.join([c for c in outputs if not unicodedata.combining(_UpperCamelCase )] )
if self.do_lower_case:
__lowercase = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ):
__lowercase = self.preprocess_text(_UpperCamelCase )
__lowercase = self.sp_model.encode(_UpperCamelCase ,out_type=_UpperCamelCase )
__lowercase = []
for piece in pieces:
if len(_UpperCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCamelCase ,'''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowercase = cur_pieces[1:]
else:
__lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCamelCase )
else:
new_pieces.append(_UpperCamelCase )
return new_pieces
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ):
return self.sp_model.PieceToId(_UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[str] ):
return self.sp_model.IdToPiece(_UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
__lowercase = ''.join(_UpperCamelCase ).replace(_UpperCamelCase ,''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Any = False ,lowercase__ : int = None ,lowercase__ : Any = True ,**lowercase__ : List[Any] ,):
__lowercase = kwargs.pop('''use_source_tokenizer''' ,_UpperCamelCase )
__lowercase = self.convert_ids_to_tokens(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowercase = []
__lowercase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
__lowercase = []
sub_texts.append(_UpperCamelCase )
else:
current_sub_text.append(_UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__lowercase = ''.join(_UpperCamelCase )
__lowercase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowercase = self.clean_up_tokenization(_UpperCamelCase )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : Dict = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ,lowercase__ : Any = None ,lowercase__ : Tuple = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase ,token_ids_a=_UpperCamelCase ,already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1, 1]
return ([0] * len(_UpperCamelCase )) + [1, 1]
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[Any] ,lowercase__ : str = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
_UpperCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 104
|
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29
| 0
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_snake_case = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCamelCase__: List[str] = None
def _UpperCamelCase ( snake_case__, snake_case__, ) -> str:
import pyspark
def generate_fn():
__UpperCAmelCase : int = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
__UpperCAmelCase : List[Any] = df_with_partition_id.select("*" ).where(f'''part_id = {partition_id}''' ).drop("part_id" )
__UpperCAmelCase : Optional[int] = partition_df.collect()
__UpperCAmelCase : Any = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self: List[Any] , __lowerCamelCase: "pyspark.sql.DataFrame" , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : List[str] = df
__UpperCAmelCase : Optional[int] = partition_order or range(self.df.rdd.getNumPartitions() )
__UpperCAmelCase : List[str] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self: str ) -> Tuple:
yield from self.generate_examples_fn()
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: np.random.Generator ) -> int:
__UpperCAmelCase : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__snake_case )
return SparkExamplesIterable(self.df , partition_order=__snake_case )
def _lowerCamelCase ( self: int , __lowerCamelCase: int , __lowerCamelCase: int ) -> int:
__UpperCAmelCase : str = self.split_shard_indices_by_worker(__snake_case , __snake_case )
return SparkExamplesIterable(self.df , partition_order=__snake_case )
@property
def _lowerCamelCase ( self: int ) -> Optional[int]:
return len(self.partition_order )
class _snake_case ( datasets.DatasetBuilder ):
lowerCamelCase__: Union[str, Any] = SparkConfig
def __init__( self: Tuple , __lowerCamelCase: "pyspark.sql.DataFrame" , __lowerCamelCase: str = None , __lowerCamelCase: str = None , **__lowerCamelCase: Tuple , ) -> Optional[int]:
import pyspark
__UpperCAmelCase : Tuple = pyspark.sql.SparkSession.builder.getOrCreate()
__UpperCAmelCase : Union[str, Any] = df
__UpperCAmelCase : Optional[int] = working_dir
super().__init__(
cache_dir=__snake_case , config_name=str(self.df.semanticHash() ) , **__snake_case , )
def _lowerCamelCase ( self: Dict ) -> str:
# Returns the path of the created file.
def create_cache_and_write_probe(__lowerCamelCase: Tuple ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__snake_case )
__UpperCAmelCase : Dict = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__snake_case , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__UpperCAmelCase : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__snake_case ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return datasets.DatasetInfo(features=self.config.features )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: datasets.download.download_manager.DownloadManager ) -> Any:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: List[str] ) -> Optional[Any]:
import pyspark
def get_arrow_batch_size(__lowerCamelCase: List[str] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
__UpperCAmelCase : List[Any] = self.df.count()
__UpperCAmelCase : Any = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__UpperCAmelCase : str = (
self.df.limit(__snake_case )
.repartition(1 )
.mapInArrow(__snake_case , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__UpperCAmelCase : Tuple = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__UpperCAmelCase : Optional[int] = min(__snake_case , int(approx_total_size / max_shard_size ) )
__UpperCAmelCase : Dict = self.df.repartition(__snake_case )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: int , ) -> Dict:
import pyspark
__UpperCAmelCase : Optional[Any] = ParquetWriter if file_format == 'parquet' else ArrowWriter
__UpperCAmelCase : Any = os.path.join(self._working_dir , os.path.basename(__snake_case ) ) if self._working_dir else fpath
__UpperCAmelCase : Union[str, Any] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__UpperCAmelCase : Optional[Any] = self.config.features
__UpperCAmelCase : Optional[Any] = self._writer_batch_size
__UpperCAmelCase : List[str] = self._fs.storage_options
def write_arrow(__lowerCamelCase: str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__UpperCAmelCase : Any = pyspark.TaskContext().taskAttemptId()
__UpperCAmelCase : int = next(__snake_case , __snake_case )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Optional[Any] = writer_class(
features=__snake_case , path=working_fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , writer_batch_size=__snake_case , storage_options=__snake_case , embed_local_files=__snake_case , )
__UpperCAmelCase : int = pa.Table.from_batches([first_batch] )
writer.write_table(__snake_case )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__UpperCAmelCase : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
__UpperCAmelCase : Dict = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , writer_batch_size=__snake_case , storage_options=__snake_case , embed_local_files=__snake_case , )
__UpperCAmelCase : Dict = pa.Table.from_batches([batch] )
writer.write_table(__snake_case )
if writer._num_bytes > 0:
__UpperCAmelCase : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__snake_case ) ):
__UpperCAmelCase : List[str] = os.path.join(os.path.dirname(__snake_case ) , os.path.basename(__snake_case ) )
shutil.move(__snake_case , __snake_case )
__UpperCAmelCase : Dict = (
self.df.mapInArrow(__snake_case , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _lowerCamelCase ( self: Dict , __lowerCamelCase: "datasets.SplitGenerator" , __lowerCamelCase: str = "arrow" , __lowerCamelCase: Optional[Union[str, int]] = None , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
self._validate_cache_dir()
__UpperCAmelCase : Optional[int] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__snake_case )
__UpperCAmelCase : Union[str, Any] = not is_remote_filesystem(self._fs )
__UpperCAmelCase : List[Any] = os.path.join if is_local else posixpath.join
__UpperCAmelCase : Any = '-TTTTT-SSSSS-of-NNNNN'
__UpperCAmelCase : List[str] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
__UpperCAmelCase : int = path_join(self._output_dir , __snake_case )
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Any = []
for task_id, content in self._prepare_split_single(__snake_case , __snake_case , __snake_case ):
(
__UpperCAmelCase
) : Any = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__snake_case )
__UpperCAmelCase : Union[str, Any] = total_num_examples
__UpperCAmelCase : Dict = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
__UpperCAmelCase : str = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__UpperCAmelCase : List[str] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int , ):
rename(
__snake_case , fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , fpath.replace("TTTTT-SSSSS" , f'''{global_shard_id:05d}''' ).replace("NNNNN" , f'''{total_shards:05d}''' ) , )
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : str = 0
for i in range(len(__snake_case ) ):
__UpperCAmelCase : Optional[int] = task_id_and_num_shards[i]
for shard_id in range(__snake_case ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__snake_case , len(__snake_case ) ).map(lambda __lowerCamelCase : _rename_shard(*__snake_case ) ).collect()
else:
# don't use any pattern
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , fpath.replace(__snake_case , "" ) , )
def _lowerCamelCase ( self: str , __lowerCamelCase: "datasets.SplitGenerator" , ) -> Dict:
return SparkExamplesIterable(self.df )
| 358
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : str = num_choices
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : str = (1, 11, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 342
| 0
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[str] ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase : Optional[Any] = mock.Mock()
_UpperCAmelCase : Optional[int] = 500
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : Optional[int] = HTTPError
_UpperCAmelCase : Any = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : List[str] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=A ) as mock_head:
_UpperCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _A ( self : int ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase : List[Any] = mock.Mock()
_UpperCAmelCase : Union[str, Any] = 500
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : Union[str, Any] = HTTPError
_UpperCAmelCase : Optional[Any] = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=A ) as mock_head:
_UpperCAmelCase : Dict = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _A ( self : str ):
# This test is for deprecated behavior and can be removed in v5
try:
_UpperCAmelCase : Any = tempfile.mktemp()
with open(A , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , A )
_UpperCAmelCase : Union[str, Any] = AlbertTokenizer.from_pretrained(A )
finally:
os.remove(A )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , A )
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _A ( self : Any ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase : Optional[Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Dict = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _A ( cls : List[Any] ):
_UpperCAmelCase : Union[str, Any] = TOKEN
HfFolder.save_token(A )
@classmethod
def _A ( cls : int ):
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _A ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Any = os.path.join(A , "vocab.txt" )
with open(A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_UpperCAmelCase : Optional[Any] = BertTokenizer(A )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
_UpperCAmelCase : Optional[Any] = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A , repo_id="test-tokenizer" , push_to_hub=A , use_auth_token=self._token )
_UpperCAmelCase : Any = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _A ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Optional[Any] = os.path.join(A , "vocab.txt" )
with open(A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_UpperCAmelCase : str = BertTokenizer(A )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
_UpperCAmelCase : int = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A , repo_id="valid_org/test-tokenizer-org" , push_to_hub=A , use_auth_token=self._token )
_UpperCAmelCase : Optional[Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _A ( self : List[Any] ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Tuple = os.path.join(A , "vocab.txt" )
with open(A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_UpperCAmelCase : List[Any] = CustomTokenizer(A )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
_UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Union[str, Any] = os.path.join(A , "vocab.txt" )
with open(A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_UpperCAmelCase : Optional[Any] = BertTokenizerFast.from_pretrained(A )
bert_tokenizer.save_pretrained(A )
_UpperCAmelCase : List[str] = CustomTokenizerFast.from_pretrained(A )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=A , trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _A ( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : int = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Tuple = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : int = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _A ( self : Optional[int] ):
_UpperCAmelCase : int = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def _A ( self : List[Any] ):
_UpperCAmelCase : List[Any] = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def _A ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
_UpperCAmelCase : List[str] = Trie()
_UpperCAmelCase : Optional[int] = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A , ["AB", "C"] )
| 31
|
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCamelCase_ ( _UpperCAmelCase : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def UpperCamelCase_ ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray ) -> XGBClassifier:
"""simple docstring"""
_UpperCAmelCase : Any = XGBClassifier()
classifier.fit(_UpperCAmelCase , _UpperCAmelCase )
return classifier
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase : List[str] = load_iris()
_UpperCAmelCase , _UpperCAmelCase : Dict = data_handling(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = train_test_split(
_UpperCAmelCase , _UpperCAmelCase , test_size=0.2_5 )
_UpperCAmelCase : Optional[Any] = iris["target_names"]
# Create an XGBoost Classifier from the training data
_UpperCAmelCase : Tuple = xgboost(_UpperCAmelCase , _UpperCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , display_labels=_UpperCAmelCase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 31
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[str] = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
UpperCAmelCase : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
UpperCAmelCase : Optional[Any] = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
UpperCAmelCase : Any = tf_top_k_top_p_filtering(_SCREAMING_SNAKE_CASE , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
UpperCAmelCase : Tuple = output[output != -float("""inf""" )]
UpperCAmelCase : Dict = tf.cast(
tf.where(tf.not_equal(_SCREAMING_SNAKE_CASE , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-12 )
tf.debugging.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , UpperCAmelCase__ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
__lowerCAmelCase : Any = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase : int = 2
UpperCAmelCase : Union[str, Any] = 2
class SCREAMING_SNAKE_CASE__ ( tf.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
super(_SCREAMING_SNAKE_CASE , self ).__init__()
UpperCAmelCase : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = self.model.generate(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE , return_dict_in_generate=_SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase : Union[str, Any] = [[2, 0], [102, 103]]
UpperCAmelCase : Any = [[1, 0], [1, 1]]
UpperCAmelCase : Optional[int] = DummyModel(model=_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , signatures={"""serving_default""": dummy_model.serving} )
UpperCAmelCase : List[str] = tf.saved_model.load(_SCREAMING_SNAKE_CASE ).signatures["""serving_default"""]
for batch_size in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 ):
UpperCAmelCase : Union[str, Any] = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
UpperCAmelCase : int = serving_func(**_SCREAMING_SNAKE_CASE )["""sequences"""]
UpperCAmelCase : Any = test_model.generate(**_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Optional[int] = 2
class SCREAMING_SNAKE_CASE__ ( tf.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
super(_SCREAMING_SNAKE_CASE , self ).__init__()
UpperCAmelCase : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model.generate(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE , return_dict_in_generate=_SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase : Optional[Any] = [[2], [102, 103]]
UpperCAmelCase : List[str] = [[1], [1, 1]]
UpperCAmelCase : str = DummyModel(model=_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , signatures={"""serving_default""": dummy_model.serving} )
UpperCAmelCase : Optional[int] = tf.saved_model.load(_SCREAMING_SNAKE_CASE ).signatures["""serving_default"""]
for input_row in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase : str = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
UpperCAmelCase : Union[str, Any] = serving_func(**_SCREAMING_SNAKE_CASE )["""sequences"""]
UpperCAmelCase : Any = test_model.generate(**_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
@require_tensorflow_text
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=_SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self ) -> str:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_SCREAMING_SNAKE_CASE , """spiece.model""" ) , """rb""" ).read() )
UpperCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = self.tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = text.pad_model_inputs(
_SCREAMING_SNAKE_CASE , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
UpperCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
return self.tokenizer.detokenize(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = CompleteSentenceTransformer()
UpperCAmelCase : Dict = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
UpperCAmelCase : str = complete_model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = tf.keras.Model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
keras_model.save(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
UpperCAmelCase : List[Any] = 14
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase : Union[str, Any] = """Hello, my dog is cute and"""
UpperCAmelCase : Dict = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
UpperCAmelCase : int = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase : Dict = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
UpperCAmelCase : List[Any] = model.generate(**_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
UpperCAmelCase : Dict = [638, 198]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
UpperCAmelCase : Tuple = model.generate(**_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
UpperCAmelCase : List[Any] = """Hugging Face is a technology company based in New York and Paris."""
UpperCAmelCase : Tuple = bart_tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ).input_ids
UpperCAmelCase : Optional[Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
UpperCAmelCase : Optional[int] = bart_model.generate(_SCREAMING_SNAKE_CASE ).numpy()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().call(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
UpperCAmelCase : Optional[Any] = bart_model.generate(_SCREAMING_SNAKE_CASE , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
class SCREAMING_SNAKE_CASE__ ( bart_model.model.encoder.__class__ ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().call(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = FakeEncoder(bart_model.config , bart_model.model.shared )
UpperCAmelCase : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
UpperCAmelCase : int = bart_model.generate(_SCREAMING_SNAKE_CASE ).numpy()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_SCREAMING_SNAKE_CASE , foo="""bar""" )
| 358
|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
A: List[Any] = get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( enum.Enum ):
__lowerCAmelCase : Dict = 'all_checks'
__lowerCAmelCase : int = 'basic_checks'
__lowerCAmelCase : Optional[Any] = 'no_checks'
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
def _snake_case ( UpperCamelCase : Optional[dict] , UpperCamelCase : dict , UpperCamelCase : int=None ):
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(UpperCamelCase ) - set(UpperCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase ) - set(UpperCamelCase ) ) )
if len(set(UpperCamelCase ) - set(UpperCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase ) - set(UpperCamelCase ) ) )
UpperCAmelCase : Tuple = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCAmelCase : Union[str, Any] = """ for """ + verification_name if verification_name is not None else """"""
if len(UpperCamelCase ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
def _snake_case ( UpperCamelCase : Optional[dict] , UpperCamelCase : dict ):
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(UpperCamelCase ) - set(UpperCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase ) - set(UpperCamelCase ) ) )
if len(set(UpperCamelCase ) - set(UpperCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase ) - set(UpperCamelCase ) ) )
UpperCAmelCase : List[str] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase ) )
logger.info("""All the splits matched successfully.""" )
def _snake_case ( UpperCamelCase : str , UpperCamelCase : bool = True ):
if record_checksum:
UpperCAmelCase : Dict = shaaaa()
with open(UpperCamelCase , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(UpperCamelCase )
UpperCAmelCase : Any = m.hexdigest()
else:
UpperCAmelCase : Dict = None
return {"num_bytes": os.path.getsize(UpperCamelCase ), "checksum": checksum}
def _snake_case ( UpperCamelCase : Union[str, Any] ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 76
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
return 10 - x * x
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if equation(snake_case_ ) * equation(snake_case_ ) >= 0:
raise ValueError("Wrong space!" )
UpperCAmelCase_ = a
while (b - a) >= 0.01:
# Find middle point
UpperCAmelCase_ = (a + b) / 2
# Check if middle point is root
if equation(snake_case_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case_ ) * equation(snake_case_ ) < 0:
UpperCAmelCase_ = c
else:
UpperCAmelCase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 1
|
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _A :
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError()
def A__ ( self ):
"""simple docstring"""
raise NotImplementedError()
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = False , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = tokenizer
lowercase = skip_prompt
lowercase = decode_kwargs
# variables used in the streaming process
lowercase = []
lowercase = 0
lowercase = True
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
lowercase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowercase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
lowercase = text[self.print_len :]
lowercase = []
lowercase = 0
# If the last token is a CJK character, we print the characters.
elif len(__lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowercase = text[self.print_len :]
self.print_len += len(__lowerCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowercase = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(__lowerCAmelCase )
self.on_finalized_text(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
if len(self.token_cache ) > 0:
lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowercase = text[self.print_len :]
lowercase = []
lowercase = 0
else:
lowercase = """"""
lowercase = True
self.on_finalized_text(__lowerCAmelCase , stream_end=__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
"""simple docstring"""
print(__lowerCAmelCase , flush=__lowerCAmelCase , end="""""" if not stream_end else None )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
lowercase = Queue()
lowercase = None
lowercase = timeout
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
"""simple docstring"""
self.text_queue.put(__lowerCAmelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
"""simple docstring"""
return self
def A__ ( self ):
"""simple docstring"""
lowercase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 197
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=lowercase_ ):
lowerCamelCase : Dict = ["flax"]
def __init__( self : List[Any] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : str ) -> List[Any]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Optional[Any] ) -> str:
requires_backends(cls , ['flax'] )
class UpperCAmelCase_ ( metaclass=lowercase_ ):
lowerCamelCase : Dict = ["flax"]
def __init__( self : int , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : str ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : int , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Dict ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Dict ) -> List[str]:
requires_backends(cls , ['flax'] )
class UpperCAmelCase_ ( metaclass=lowercase_ ):
lowerCamelCase : Optional[Any] = ["flax"]
def __init__( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> str:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : int ) -> Optional[int]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Dict , *UpperCAmelCase__ : str , **UpperCAmelCase__ : int ) -> List[str]:
requires_backends(cls , ['flax'] )
class UpperCAmelCase_ ( metaclass=lowercase_ ):
lowerCamelCase : Optional[Any] = ["flax"]
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : int , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : List[Any] ) -> List[str]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : List[str] ) -> Dict:
requires_backends(cls , ['flax'] )
class UpperCAmelCase_ ( metaclass=lowercase_ ):
lowerCamelCase : List[str] = ["flax"]
def __init__( self : Tuple , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Dict ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Dict ) -> Optional[int]:
requires_backends(cls , ['flax'] )
class UpperCAmelCase_ ( metaclass=lowercase_ ):
lowerCamelCase : int = ["flax"]
def __init__( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int] ) -> List[str]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : int ) -> List[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Tuple ) -> Any:
requires_backends(cls , ['flax'] )
class UpperCAmelCase_ ( metaclass=lowercase_ ):
lowerCamelCase : Optional[Any] = ["flax"]
def __init__( self : Optional[Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] ) -> Any:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : int , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[int] ) -> List[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Any , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ) -> int:
requires_backends(cls , ['flax'] )
class UpperCAmelCase_ ( metaclass=lowercase_ ):
lowerCamelCase : Dict = ["flax"]
def __init__( self : int , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Tuple ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Any ) -> List[Any]:
requires_backends(cls , ['flax'] )
class UpperCAmelCase_ ( metaclass=lowercase_ ):
lowerCamelCase : List[str] = ["flax"]
def __init__( self : List[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : str ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Dict , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any] ) -> Dict:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : str ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
class UpperCAmelCase_ ( metaclass=lowercase_ ):
lowerCamelCase : int = ["flax"]
def __init__( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[Any] ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Dict , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Tuple ) -> int:
requires_backends(cls , ['flax'] )
class UpperCAmelCase_ ( metaclass=lowercase_ ):
lowerCamelCase : Any = ["flax"]
def __init__( self : Any , *UpperCAmelCase__ : int , **UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Any , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Tuple ) -> Tuple:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[Any] ) -> str:
requires_backends(cls , ['flax'] )
class UpperCAmelCase_ ( metaclass=lowercase_ ):
lowerCamelCase : str = ["flax"]
def __init__( self : Optional[Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Union[str, Any] ) -> List[str]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
class UpperCAmelCase_ ( metaclass=lowercase_ ):
lowerCamelCase : Tuple = ["flax"]
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Tuple ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : List[Any] ) -> int:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : str , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Any ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
| 353
|
'''simple docstring'''
def a_ ( ):
lowerCAmelCase = []
lowerCAmelCase = 1
while len(lowerCamelCase ) < 1e6:
constant.append(str(lowerCamelCase ) )
i += 1
lowerCAmelCase = ''.join(lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 55
| 0
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : float):
return 0.0
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE_: str = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = 5_12
SCREAMING_SNAKE_CASE_: Optional[Any] = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: str = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: str = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Optional[int] = np.abs(np.fft.fft(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: int = 20 * np.logaa(_UpperCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE_: Union[str, Any] = get_bounds(_UpperCAmelCase , _UpperCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_UpperCAmelCase )
plt.show()
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 5_12
SCREAMING_SNAKE_CASE_: Tuple = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: str = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: Any = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Tuple = np.angle(np.fft.fft(_UpperCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) )
plt.show()
| 13
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class _snake_case ( _lowercase ):
lowerCamelCase__: Tuple = ["input_features"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any]=80 , __lowerCamelCase: Optional[Any]=1_60_00 , __lowerCamelCase: Any=1_60 , __lowerCamelCase: Optional[int]=30 , __lowerCamelCase: List[str]=4_00 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Union[str, Any]=False , **__lowerCamelCase: Dict , ) -> Any:
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = n_fft
__UpperCAmelCase : List[str] = hop_length
__UpperCAmelCase : Optional[Any] = chunk_length
__UpperCAmelCase : Union[str, Any] = chunk_length * sampling_rate
__UpperCAmelCase : Any = self.n_samples // hop_length
__UpperCAmelCase : Tuple = sampling_rate
__UpperCAmelCase : List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCamelCase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__lowerCamelCase , norm="slaney" , mel_scale="slaney" , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: np.array ) -> np.ndarray:
__UpperCAmelCase : List[Any] = spectrogram(
__lowerCamelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
__UpperCAmelCase : Union[str, Any] = log_spec[:, :-1]
__UpperCAmelCase : List[Any] = np.maximum(__lowerCamelCase , log_spec.max() - 8.0 )
__UpperCAmelCase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowerCamelCase ( __lowerCamelCase: List[np.ndarray] , __lowerCamelCase: List[np.ndarray] , __lowerCamelCase: float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
__UpperCAmelCase : Tuple = np.array(__lowerCamelCase , np.intaa )
__UpperCAmelCase : Dict = []
for vector, length in zip(__lowerCamelCase , attention_mask.sum(-1 ) ):
__UpperCAmelCase : Union[str, Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__UpperCAmelCase : Dict = padding_value
normed_input_values.append(__lowerCamelCase )
else:
__UpperCAmelCase : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self: Dict , __lowerCamelCase: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[Union[str, TensorType]] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[str] = "max_length" , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__UpperCAmelCase : List[Any] = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__UpperCAmelCase : Optional[int] = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase : Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : str = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase : Optional[Any] = [np.asarray([raw_speech] ).T]
__UpperCAmelCase : List[Any] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
__UpperCAmelCase : List[str] = self.pad(
__lowerCamelCase , padding=__lowerCamelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__UpperCAmelCase : List[Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
__UpperCAmelCase : str = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
__UpperCAmelCase : Any = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
__UpperCAmelCase : Dict = [self._np_extract_fbank_features(__lowerCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCamelCase ):
__UpperCAmelCase : str = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features]
else:
__UpperCAmelCase : List[str] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__UpperCAmelCase : int = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
__UpperCAmelCase : List[str] = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
def _lowerCamelCase ( self: str ) -> Dict[str, Any]:
__UpperCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 157
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : Any = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 249
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = 0
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ ).to_dict()
config_dict.pop('image_processor_type' )
UpperCamelCase = CLIPImageProcessor(**UpperCamelCase__ )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
config.save_pretrained(UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , 'clip-base is not a local folder and is not a valid model identifier' ):
UpperCamelCase = AutoImageProcessor.from_pretrained('clip-base' )
def A ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ , revision='aaaaaa' )
def A ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCamelCase = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def A ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def A ( self : Optional[Any] ):
"""simple docstring"""
try:
AutoConfig.register('custom' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
UpperCamelCase = CustomImageProcessor.from_pretrained(UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A ( self : Optional[int] ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
try:
AutoConfig.register('custom' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(UpperCamelCase__ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 249
| 1
|
import pprint
import requests
UpperCamelCase__ = 'https://zenquotes.io/api'
def lowerCAmelCase_ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def lowerCAmelCase_ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
UpperCamelCase__ = random_quotes()
pprint.pprint(response)
| 65
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
snake_case_ = nn.functional.normalize(UpperCAmelCase )
snake_case_ = nn.functional.normalize(UpperCAmelCase )
return torch.mm(UpperCAmelCase , normalized_text_embeds.t() )
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = CLIPConfig
SCREAMING_SNAKE_CASE_ = ["CLIPEncoderLayer"]
def __init__( self, lowerCAmelCase__) -> Optional[int]:
super().__init__(lowerCAmelCase__)
snake_case_ = CLIPVisionModel(config.vision_config)
snake_case_ = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(17), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(3), requires_grad=lowerCAmelCase__)
@torch.no_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = self.vision_model(lowerCAmelCase__)[1] # pooled_output
snake_case_ = self.visual_projection(lowerCAmelCase__)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ = cosine_distance(lowerCAmelCase__, self.special_care_embeds).cpu().float().numpy()
snake_case_ = cosine_distance(lowerCAmelCase__, self.concept_embeds).cpu().float().numpy()
snake_case_ = []
snake_case_ = image_embeds.shape[0]
for i in range(lowerCAmelCase__):
snake_case_ = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ = 0.0
for concept_idx in range(len(special_cos_dist[0])):
snake_case_ = special_cos_dist[i][concept_idx]
snake_case_ = self.special_care_embeds_weights[concept_idx].item()
snake_case_ = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]})
snake_case_ = 0.01
for concept_idx in range(len(cos_dist[0])):
snake_case_ = cos_dist[i][concept_idx]
snake_case_ = self.concept_embeds_weights[concept_idx].item()
snake_case_ = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__)
result.append(lowerCAmelCase__)
snake_case_ = [len(res['bad_concepts']) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
snake_case_ = self.vision_model(lowerCAmelCase__)[1] # pooled_output
snake_case_ = self.visual_projection(lowerCAmelCase__)
snake_case_ = cosine_distance(lowerCAmelCase__, self.special_care_embeds)
snake_case_ = cosine_distance(lowerCAmelCase__, self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ = 0.0
snake_case_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ = torch.any(special_scores > 0, dim=1)
snake_case_ = special_care * 0.01
snake_case_ = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1])
snake_case_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ = torch.any(concept_scores > 0, dim=1)
return images, has_nsfw_concepts
| 69
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "roberta-prelayernorm"
def __init__( self, lowerCAmelCase__=5_0265, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=512, lowerCAmelCase__=2, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=0, lowerCAmelCase__=2, lowerCAmelCase__="absolute", lowerCAmelCase__=True, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict:
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = classifier_dropout
class UpperCamelCase ( lowerCAmelCase__ ):
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 312
|
"""simple docstring"""
__UpperCamelCase = 256
# Modulus to hash a string
__UpperCamelCase = 100_0003
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
snake_case_ = 'Lü'
snake_case_ = 'Lüsai'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'Lue'
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 312
| 1
|
def UpperCamelCase ( __lowercase : Dict ,__lowercase : int ):
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(lowercase__ ,x % y )
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : List[Any] ):
'''simple docstring'''
return (x * y) // greatest_common_divisor(lowercase__ ,lowercase__ )
def UpperCamelCase ( __lowercase : Optional[Any] = 20 ):
'''simple docstring'''
A_ : Optional[int] = 1
for i in range(1 ,n + 1 ):
A_ : str = lcm(lowercase__ ,lowercase__ )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 140
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowercase__ )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''The csv file to plot.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
UpperCamelCase_ : Optional[List[str]] =list_field(
default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
try:
int(lowercase__ )
return True
except ValueError:
return False
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
try:
float(lowercase__ )
return True
except ValueError:
return False
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= args
__lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
__lowercase= csv.DictReader(lowerCAmelCase )
for row in reader:
__lowercase= row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
__lowercase= int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
__lowercase= float(row['result'] )
def _A (self ):
__lowercase, __lowercase= plt.subplots()
__lowercase= 'Time usage' if self.args.is_time else 'Memory usage'
__lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) )
__lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) )
__lowercase= self.result_dict[model_name]['result']
((__lowercase), (__lowercase))= (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowercase= (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowercase= np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , )
else:
__lowercase= np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowercase), (__lowercase))= (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )]
plt.scatter(
lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(lowerCAmelCase , lowerCAmelCase , '--' )
title_str += f' {label_model_name} vs.'
__lowercase= title_str[:-4]
__lowercase= 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCAmelCase )
plt.xlabel(lowerCAmelCase )
plt.ylabel(lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= HfArgumentParser(lowercase__ )
__lowercase= parser.parse_args_into_dataclasses()[0]
__lowercase= Plot(args=lowercase__ )
plot.plot()
if __name__ == "__main__":
main()
| 295
| 0
|
"""simple docstring"""
def __lowercase ( _a ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_a ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 358
|
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase__ : int = logging.get_logger('''transformers.models.encodec''')
lowercase__ : Optional[int] = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowercase__ : Tuple = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowercase__ : List[str] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowercase__ : List[Any] = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowercase__ : int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowercase__ : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase__ : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase__ : int = []
lowercase__ : Dict = []
def __lowercase ( _a , _a , _a , _a , _a ):
for attribute in key.split('''.''' ):
snake_case_ : Optional[Any] = getattr(_a , _a )
if weight_type is not None:
snake_case_ : Union[str, Any] = getattr(_a , _a ).shape
else:
snake_case_ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
snake_case_ : Dict = value
elif weight_type == "weight_g":
snake_case_ : List[Any] = value
elif weight_type == "weight_v":
snake_case_ : List[Any] = value
elif weight_type == "bias":
snake_case_ : Optional[Any] = value
elif weight_type == "running_mean":
snake_case_ : str = value
elif weight_type == "running_var":
snake_case_ : List[Any] = value
elif weight_type == "num_batches_tracked":
snake_case_ : Tuple = value
elif weight_type == "weight_ih_l0":
snake_case_ : Dict = value
elif weight_type == "weight_hh_l0":
snake_case_ : str = value
elif weight_type == "bias_ih_l0":
snake_case_ : str = value
elif weight_type == "bias_hh_l0":
snake_case_ : Dict = value
elif weight_type == "weight_ih_l1":
snake_case_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
snake_case_ : Dict = value
elif weight_type == "bias_ih_l1":
snake_case_ : List[str] = value
elif weight_type == "bias_hh_l1":
snake_case_ : Optional[int] = value
else:
snake_case_ : Dict = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def __lowercase ( _a , _a ):
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case_, snake_case_ : Tuple = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowercase ( _a , _a , _a ):
snake_case_ : str = []
if model_name == "encodec_24khz" or "encodec_32khz":
snake_case_ : Any = MAPPING_24K
elif model_name == "encodec_48khz":
snake_case_ : int = MAPPING_48K
else:
raise ValueError(f"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(_a , _a ):
logger.info(f"{name} was ignored" )
continue
snake_case_ : Optional[Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
snake_case_, snake_case_ : List[Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
snake_case_ : Any = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
snake_case_ : str = True
if "*" in mapped_key:
snake_case_ : Optional[Any] = name.split(_a )[0].split('''.''' )[-2]
snake_case_ : str = mapped_key.replace('''*''' , _a )
if "weight_g" in name:
snake_case_ : int = '''weight_g'''
elif "weight_v" in name:
snake_case_ : List[str] = '''weight_v'''
elif "weight_ih_l0" in name:
snake_case_ : List[Any] = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
snake_case_ : Tuple = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
snake_case_ : Any = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
snake_case_ : Dict = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
snake_case_ : str = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
snake_case_ : List[Any] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
snake_case_ : List[Any] = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
snake_case_ : List[Any] = '''bias_hh_l1'''
elif "bias" in name:
snake_case_ : Optional[int] = '''bias'''
elif "weight" in name:
snake_case_ : str = '''weight'''
elif "running_mean" in name:
snake_case_ : Optional[int] = '''running_mean'''
elif "running_var" in name:
snake_case_ : int = '''running_var'''
elif "num_batches_tracked" in name:
snake_case_ : Optional[int] = '''num_batches_tracked'''
else:
snake_case_ : Optional[Any] = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(f"Unused weights: {unused_weights}" )
@torch.no_grad()
def __lowercase ( _a , _a , _a , _a=None , _a=None , ):
if config_path is not None:
snake_case_ : Optional[int] = EncodecConfig.from_pretrained(_a )
else:
snake_case_ : str = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
snake_case_ : Union[str, Any] = [8, 5, 4, 4]
snake_case_ : Optional[int] = [2.2]
snake_case_ : Any = 64
snake_case_ : Dict = 32_000
snake_case_ : int = 2_048
snake_case_ : int = False
snake_case_ : Optional[int] = False
snake_case_ : Optional[int] = False
elif model_name == "encodec_48khz":
snake_case_ : List[str] = [8, 5, 4, 2]
snake_case_ : List[Any] = [3.0, 6.0, 12.0, 24.0]
snake_case_ : Any = 48_000
snake_case_ : List[str] = 2
snake_case_ : int = False
snake_case_ : str = '''time_group_norm'''
snake_case_ : int = True
snake_case_ : List[str] = 1.0
snake_case_ : Tuple = 0.01
else:
raise ValueError(f"Unknown model name: {model_name}" )
snake_case_ : Any = EncodecModel(_a )
snake_case_ : str = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_a )
snake_case_ : Optional[Any] = torch.load(_a )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
snake_case_ : Union[str, Any] = original_checkpoint['''best_state''']
recursively_load_weights(_a , _a , _a )
model.save_pretrained(_a )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_a )
model.push_to_hub(_a )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 155
| 0
|
from math import factorial
__UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def a_ ( _A ) -> int:
"""simple docstring"""
if not isinstance(_A , _A ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_A ) )
def a_ ( _A = 60 , _A = 1000000 ) -> int:
"""simple docstring"""
if not isinstance(_A , _A ) or not isinstance(_A , _A ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
snake_case__ = 0
# the cached sizes of the previous chains
snake_case__ = {}
for start_chain_element in range(1 , _A ):
# The temporary set will contain the elements of the chain
snake_case__ = set()
snake_case__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
snake_case__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_A )
chain_set_length += 1
snake_case__ = digit_factorial_sum(_A )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
snake_case__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 307
|
import random
from typing import Any
def a_ ( _A ) -> list[Any]:
"""simple docstring"""
for _ in range(len(_A ) ):
snake_case__ = random.randint(0 , len(_A ) - 1 )
snake_case__ = random.randint(0 , len(_A ) - 1 )
snake_case__ , snake_case__ = data[b], data[a]
return data
if __name__ == "__main__":
__UpperCamelCase : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
__UpperCamelCase : Any = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 307
| 1
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
UpperCAmelCase : Optional[Any] = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
return (preds == labels).mean()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
lowercase_ = simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
lowercase_ = pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0]
lowercase_ = spearmanr(__lowerCAmelCase , __lowerCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F'''Predictions and labels have mismatched lengths {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
else:
raise KeyError(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
warnings.warn(__lowerCAmelCase , __lowerCAmelCase )
requires_backends(__lowerCAmelCase , """sklearn""" )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
else:
raise KeyError(__lowerCAmelCase )
| 359
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase : Dict = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase : Union[str, Any] = 10
UpperCAmelCase : Union[str, Any] = 256
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[MinHash]:
'''simple docstring'''
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , *,
lowerCAmelCase_ : float = 0.85 , ):
"""simple docstring"""
lowercase_ = duplication_jaccard_threshold
lowercase_ = NUM_PERM
lowercase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
lowercase_ = defaultdict(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : MinHash):
"""simple docstring"""
lowercase_ = self._index.query(lowerCAmelCase_)
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''')
return
self._index.insert(lowerCAmelCase_ , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase_ = [base] + list(lowerCAmelCase_)
# reformat the cluster to be a list of dict
lowercase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase_)
return duplicate_clusters
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.get_duplicate_clusters()
with open(lowerCAmelCase_ , """w""") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = element
lowercase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for elementa in cluster:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowercase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
global _shared_dataset
lowercase_ = dataset
lowercase_ = []
lowercase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowercase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowercase_ = {}
lowercase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase_ = element
lowercase_ = duplicate_indices - set(extreme_dict.keys() )
lowercase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowercase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(__lowerCAmelCase )}''' )
print(F'''Number of duplicate clusters: {len(__lowerCAmelCase )}''' )
print(F'''Files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Unique files in duplicate cluster: {len(__lowerCAmelCase )}''' )
print(F'''Filtered dataset size: {len(__lowerCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 313
| 0
|
'''simple docstring'''
import numpy as np
def lowerCamelCase (_SCREAMING_SNAKE_CASE : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : List[Any] = "▁"
A_ : str = {"vocab_file": "sentencepiece.bpe.model"}
A_ : Union[str, Any] = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
A_ : List[str] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = ['input_ids', 'attention_mask']
def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : Dict="<mask>" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = len(self.sp_model ) + self.fairseq_offset
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , __UpperCAmelCase : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : int ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 165
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = ['''pixel_values''']
def __init__( self :str , __magic_name__ :bool = True , __magic_name__ :Optional[Dict[str, int]] = None , __magic_name__ :PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ :bool = True , __magic_name__ :Dict[str, int] = None , __magic_name__ :bool = True , __magic_name__ :Union[int, float] = 1 / 255 , __magic_name__ :bool = True , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , **__magic_name__ :Optional[int] , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = size if size is not None else {"""shortest_edge""": 256}
a = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
a = get_size_dict(__magic_name__ , param_name="""crop_size""" )
a = do_resize
a = size
a = resample
a = do_center_crop
a = crop_size
a = do_rescale
a = rescale_factor
a = do_normalize
a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self :int , __magic_name__ :np.ndarray , __magic_name__ :Dict[str, int] , __magic_name__ :PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Optional[Any] , ):
'''simple docstring'''
a = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
a = get_resize_output_image_size(__magic_name__ , size=size["""shortest_edge"""] , default_to_square=__magic_name__ )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :np.ndarray , __magic_name__ :Dict[str, int] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Dict , ):
'''simple docstring'''
a = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(__magic_name__ , size=(size["""height"""], size["""width"""]) , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :np.ndarray , __magic_name__ :float , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :str ):
'''simple docstring'''
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :np.ndarray , __magic_name__ :Union[float, List[float]] , __magic_name__ :Union[float, List[float]] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Optional[Any] , ):
'''simple docstring'''
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :ImageInput , __magic_name__ :Optional[bool] = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :PILImageResampling = None , __magic_name__ :bool = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[float] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[str, TensorType]] = None , __magic_name__ :Union[str, ChannelDimension] = ChannelDimension.FIRST , **__magic_name__ :Union[str, Any] , ):
'''simple docstring'''
a = do_resize if do_resize is not None else self.do_resize
a = size if size is not None else self.size
a = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
a = resample if resample is not None else self.resample
a = do_center_crop if do_center_crop is not None else self.do_center_crop
a = crop_size if crop_size is not None else self.crop_size
a = get_size_dict(__magic_name__ , param_name="""crop_size""" )
a = do_rescale if do_rescale is not None else self.do_rescale
a = rescale_factor if rescale_factor is not None else self.rescale_factor
a = do_normalize if do_normalize is not None else self.do_normalize
a = image_mean if image_mean is not None else self.image_mean
a = image_std if image_std is not None else self.image_std
a = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
a = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
a = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
a = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
a = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
a = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
a = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
a = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict , __magic_name__ :List[Tuple] = None ):
'''simple docstring'''
a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__magic_name__ ):
a = target_sizes.numpy()
a = []
for idx in range(len(__magic_name__ ) ):
a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__magic_name__ )
a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__magic_name__ )
else:
a = logits.argmax(dim=1 )
a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 347
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = (IPNDMScheduler,)
UpperCamelCase__ = (('''num_inference_steps''', 50),)
def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
a = {"""num_train_timesteps""": 1000}
config.update(**__magic_name__ )
return config
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ):
scheduler.set_timesteps(__magic_name__ )
elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a = dummy_past_residuals[:]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.full_loop()
a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 347
| 1
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowercase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
A = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,'schedulers/' ) )
A = self.diffusers_dir
shutil.copy(
os.path.join(A_ ,'src/diffusers/schedulers/scheduling_ddpm.py' ) ,os.path.join(self.diffusers_dir ,'schedulers/scheduling_ddpm.py' ) ,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[str]=None ) -> List[str]:
A = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
A = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
A = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
A = black.format_str(A_ ,mode=A_ )
A = os.path.join(self.diffusers_dir ,'new_code.py' )
with open(A_ ,'w' ,newline='\n' ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=A_ )
with open(A_ ,'r' ) as f:
self.assertTrue(f.read() ,A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' ,'DDPMSchedulerOutput' ,REFERENCE_CODE + '\n' ,)
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' ,'DDPMSchedulerOutput' ,A_ ,)
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' ,'TestSchedulerOutput' ,re.sub('DDPM' ,'Test' ,A_ ) ,)
# Copy consistency with a really long name
A = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' ,F'{long_class_name}SchedulerOutput' ,re.sub('Bert' ,A_ ,A_ ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' ,'TestSchedulerOutput' ,A_ ,overwrite_result=re.sub('DDPM' ,'Test' ,A_ ) ,)
| 74
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCAmelCase = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
__lowerCAmelCase = F"""{src_lang}-{tgt_lang}"""
__lowerCAmelCase = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase)
__lowerCAmelCase = os.path.join(lowerCamelCase, '''README.md''')
print(F"""Generating {path}""")
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.write(lowerCamelCase)
# make sure we are under the root of the project
_UpperCAmelCase : Dict = Path(__file__).resolve().parent.parent.parent
_UpperCAmelCase : Optional[int] = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = model_name.split("""-""")
_UpperCAmelCase : Union[str, Any] = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 174
| 0
|
"""simple docstring"""
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
a_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
a_ = "sshleifer/student_marian_en_ro_6_1"
a_ = "sshleifer/tiny-mbart"
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE__):
def a_ ( self : str , a__ : int=False , a__ : Any=None , a__ : Any=True , a__ : Optional[int]=True , a__ : Union[str, Any]=True , a__ : Optional[Any]=True , ) -> Optional[int]:
'''simple docstring'''
_A = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=a_ , num_train_epochs=1 , distributed=a_ , extra_args_str=a_ , predict_with_generate=a_ , do_train=a_ , do_eval=a_ , do_predict=a_ , )
_A = TrainerState.load_from_json(os.path.join(a_ , "trainer_state.json" ) ).log_history
if not do_eval:
return
_A = [log for log in logs if "eval_loss" in log.keys()]
_A = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_A = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , a_ )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def a_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
self.run_seqaseq_quick(distributed=a_ )
@require_torch_multi_gpu
def a_ ( self : Dict ) -> Dict:
'''simple docstring'''
self.run_seqaseq_quick(distributed=a_ )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def a_ ( self : Tuple ) -> str:
'''simple docstring'''
self.run_seqaseq_quick(distributed=a_ , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def a_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=a_ , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
self.run_seqaseq_quick(distributed=a_ , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=a_ )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
self.run_seqaseq_quick(
distributed=a_ , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=a_ )
@require_apex
@require_torch_gpu
def a_ ( self : str ) -> str:
'''simple docstring'''
self.run_seqaseq_quick(distributed=a_ , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=a_ , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def a_ ( self : List[Any] , a__ : Any ) -> List[str]:
'''simple docstring'''
_A = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
_A = experiments[experiment_id]
_A = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
_A = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**a_ , extra_args_str=data["extra_args_str"] )
_A = len(re.findall(a_ , cl.err ) )
self.assertEqual(a_ , data["n_matches"] )
@slow
def a_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_A = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=a_ , learning_rate=3E-4 , num_train_epochs=10 , distributed=a_ , )
# Check metrics
_A = TrainerState.load_from_json(os.path.join(a_ , "trainer_state.json" ) ).log_history
_A = [log for log in logs if "eval_loss" in log.keys()]
_A = eval_metrics[0]
_A = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , a_ )
# test if do_predict saves generations and metrics
_A = os.listdir(a_ )
_A = {os.path.basename(a_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def a_ ( self : str ) -> Dict:
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(a__ : str ) -> Tuple[int, float]:
_A = "--skip_memory_metrics 0"
_A = self.run_trainer(
max_len=1_28 , model_name=a_ , learning_rate=3E-4 , num_train_epochs=1 , optim=a_ , distributed=a_ , extra_args_str=a_ , do_eval=a_ , do_predict=a_ , n_gpus_to_use=1 , )
# Check metrics
_A = TrainerState.load_from_json(Path(a_ , "trainer_state.json" ) ).log_history
_A = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
_A = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
_A = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_A , _A , _A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_A , _A , _A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_A = gpu_peak_mem_orig + gpu_alloc_mem_orig
_A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_A = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_A = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
a_ , a_ , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
a_ , a_ , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
a_ , a_ , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def a_ ( self : Union[str, Any] , a__ : int , a__ : str , a__ : int , a__ : float = 3E-3 , a__ : str = "adafactor" , a__ : bool = False , a__ : str = None , a__ : int = 0 , a__ : bool = True , a__ : bool = True , a__ : bool = True , a__ : bool = True , a__ : int = None , ) -> int:
'''simple docstring'''
_A = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
_A = self.get_auto_remove_tmp_dir()
_A = F"""\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(a_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(a_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n """.split()
_A = F"""\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(a_ )}\n """.split()
_A = "\n --do_predict\n ".split()
_A = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_A = get_gpu_count()
_A = get_torch_dist_unique_port()
_A = F"""\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n """.split()
_A = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a_ , env=self.get_env() )
else:
_A = ["run_translation.py"] + args
with patch.object(a_ , "argv" , a_ ):
main()
return output_dir
| 359
|
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
a_ = logging.get_logger(__name__)
# General docstring
a_ = "PoolFormerConfig"
# Base docstring
a_ = "sail/poolformer_s12"
a_ = [1, 5_12, 7, 7]
# Image classification docstring
a_ = "sail/poolformer_s12"
a_ = "tabby, tabby cat"
a_ = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def a__ ( __lowercase , __lowercase = 0.0 , __lowercase = False ) -> Dict:
if drop_prob == 0.0 or not training:
return input
_A = 1 - drop_prob
_A = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_A = keep_prob + torch.rand(__lowercase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_A = input.div(__lowercase ) * random_tensor
return output
class snake_case ( nn.Module):
def __init__( self : Any , a__ : Optional[float] = None ) -> None:
'''simple docstring'''
super().__init__()
_A = drop_prob
def a_ ( self : Optional[Any] , a__ : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return drop_path(a__ , self.drop_prob , self.training )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
return "p={}".format(self.drop_prob )
class snake_case ( nn.Module):
def __init__( self : Union[str, Any] , a__ : List[Any] , a__ : Any , a__ : List[Any] , a__ : Optional[int] , a__ : Dict , a__ : str=None ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_A = patch_size if isinstance(a__ , collections.abc.Iterable ) else (patch_size, patch_size)
_A = stride if isinstance(a__ , collections.abc.Iterable ) else (stride, stride)
_A = padding if isinstance(a__ , collections.abc.Iterable ) else (padding, padding)
_A = nn.Convad(a__ , a__ , kernel_size=a__ , stride=a__ , padding=a__ )
_A = norm_layer(a__ ) if norm_layer else nn.Identity()
def a_ ( self : Dict , a__ : Any ) -> List[str]:
'''simple docstring'''
_A = self.projection(a__ )
_A = self.norm(a__ )
return embeddings
class snake_case ( nn.GroupNorm):
def __init__( self : Dict , a__ : Optional[int] , **a__ : Dict ) -> Optional[Any]:
'''simple docstring'''
super().__init__(1 , a__ , **a__ )
class snake_case ( nn.Module):
def __init__( self : int , a__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_A = nn.AvgPoolad(a__ , stride=1 , padding=pool_size // 2 , count_include_pad=a__ )
def a_ ( self : List[str] , a__ : int ) -> str:
'''simple docstring'''
return self.pool(a__ ) - hidden_states
class snake_case ( nn.Module):
def __init__( self : Tuple , a__ : Optional[int] , a__ : Optional[Any] , a__ : List[str] , a__ : Optional[int] ) -> Any:
'''simple docstring'''
super().__init__()
_A = nn.Convad(a__ , a__ , 1 )
_A = nn.Convad(a__ , a__ , 1 )
_A = PoolFormerDropPath(a__ )
if isinstance(config.hidden_act , a__ ):
_A = ACTaFN[config.hidden_act]
else:
_A = config.hidden_act
def a_ ( self : List[Any] , a__ : int ) -> Dict:
'''simple docstring'''
_A = self.conva(a__ )
_A = self.act_fn(a__ )
_A = self.drop(a__ )
_A = self.conva(a__ )
_A = self.drop(a__ )
return hidden_states
class snake_case ( nn.Module):
def __init__( self : Union[str, Any] , a__ : str , a__ : List[str] , a__ : List[Any] , a__ : List[str] , a__ : Optional[Any] , a__ : Tuple ) -> Dict:
'''simple docstring'''
super().__init__()
_A = PoolFormerPooling(a__ )
_A = PoolFormerOutput(a__ , a__ , a__ , a__ )
_A = PoolFormerGroupNorm(a__ )
_A = PoolFormerGroupNorm(a__ )
# Useful for training neural nets
_A = PoolFormerDropPath(a__ ) if drop_path > 0.0 else nn.Identity()
_A = config.use_layer_scale
if config.use_layer_scale:
_A = nn.Parameter(
config.layer_scale_init_value * torch.ones((a__) ) , requires_grad=a__ )
_A = nn.Parameter(
config.layer_scale_init_value * torch.ones((a__) ) , requires_grad=a__ )
def a_ ( self : Union[str, Any] , a__ : Optional[int] ) -> Tuple:
'''simple docstring'''
if self.use_layer_scale:
_A = self.pooling(self.before_norm(a__ ) )
_A = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_A = hidden_states + self.drop_path(a__ )
_A = ()
_A = self.output(self.after_norm(a__ ) )
_A = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_A = hidden_states + self.drop_path(a__ )
_A = (output,) + outputs
return outputs
else:
_A = self.drop_path(self.pooling(self.before_norm(a__ ) ) )
# First residual connection
_A = pooling_output + hidden_states
_A = ()
# Second residual connection inside the PoolFormerOutput block
_A = self.drop_path(self.output(self.after_norm(a__ ) ) )
_A = hidden_states + layer_output
_A = (output,) + outputs
return outputs
class snake_case ( nn.Module):
def __init__( self : str , a__ : int ) -> Any:
'''simple docstring'''
super().__init__()
_A = config
# stochastic depth decay rule
_A = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_A = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_A = nn.ModuleList(a__ )
# Transformer blocks
_A = []
_A = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_A = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
a__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(a__ ) )
_A = nn.ModuleList(a__ )
def a_ ( self : Tuple , a__ : Union[str, Any] , a__ : Tuple=False , a__ : List[str]=True ) -> List[Any]:
'''simple docstring'''
_A = () if output_hidden_states else None
_A = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_A , _A = layers
# Get patch embeddings from hidden_states
_A = embedding_layer(a__ )
# Send the embeddings through the blocks
for _, blk in enumerate(a__ ):
_A = blk(a__ )
_A = layer_outputs[0]
if output_hidden_states:
_A = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=a__ , hidden_states=a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = PoolFormerConfig
__UpperCamelCase = 'poolformer'
__UpperCamelCase = 'pixel_values'
__UpperCamelCase = True
def a_ ( self : Tuple , a__ : Dict ) -> Any:
'''simple docstring'''
if isinstance(a__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(a__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def a_ ( self : int , a__ : Dict , a__ : int=False ) -> str:
'''simple docstring'''
if isinstance(a__ , a__ ):
_A = value
a_ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
a_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , _UpperCamelCase , )
class snake_case ( _UpperCamelCase):
def __init__( self : int , a__ : Dict ) -> str:
'''simple docstring'''
super().__init__(a__ )
_A = config
_A = PoolFormerEncoder(a__ )
# Initialize weights and apply final processing
self.post_init()
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ ( self : Tuple , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[bool] = None , a__ : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
'''simple docstring'''
_A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
_A = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ , )
_A = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=a__ , hidden_states=encoder_outputs.hidden_states , )
class snake_case ( nn.Module):
def __init__( self : List[str] , a__ : Dict ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_A = nn.Linear(config.hidden_size , config.hidden_size )
def a_ ( self : int , a__ : Tuple ) -> str:
'''simple docstring'''
_A = self.dense(a__ )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , _UpperCamelCase , )
class snake_case ( _UpperCamelCase):
def __init__( self : Tuple , a__ : str ) -> Optional[int]:
'''simple docstring'''
super().__init__(a__ )
_A = config.num_labels
_A = PoolFormerModel(a__ )
# Final norm
_A = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_A = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ ( self : int , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.LongTensor] = None , a__ : Optional[bool] = None , a__ : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
_A = return_dict if return_dict is not None else self.config.use_return_dict
_A = self.poolformer(
a__ , output_hidden_states=a__ , return_dict=a__ , )
_A = outputs[0]
_A = self.classifier(self.norm(a__ ).mean([-2, -1] ) )
_A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_A = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_A = "single_label_classification"
else:
_A = "multi_label_classification"
if self.config.problem_type == "regression":
_A = MSELoss()
if self.num_labels == 1:
_A = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_A = loss_fct(a__ , a__ )
elif self.config.problem_type == "single_label_classification":
_A = CrossEntropyLoss()
_A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_A = BCEWithLogitsLoss()
_A = loss_fct(a__ , a__ )
if not return_dict:
_A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
| 163
| 0
|
"""simple docstring"""
import re
def lowercase (_lowerCAmelCase ):
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301
| 1
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase__ : Dict = """src/diffusers"""
lowercase__ : Optional[Any] = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase__ : List[Any] = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase__ : Union[str, Any] = spec.loader.load_module()
def UpperCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ) -> List[str]:
"""simple docstring"""
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , lowerCAmelCase__ ) is not None
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Dict = object_name.split('.' )
lowerCAmelCase_ : Any = 0
# First let's find the module where our object lives.
lowerCAmelCase_ : Optional[Any] = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , f"{module}.py" ) ):
i += 1
if i < len(lowerCAmelCase__ ):
lowerCAmelCase_ : Tuple = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(lowerCAmelCase__ , f"{module}.py" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase_ : Optional[int] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : Any = ''
lowerCAmelCase_ : List[Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(Rf"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(f" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : Any = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase_ : Dict = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
lowercase__ : str = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase__ : Optional[int] = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase__ : Dict = re.compile(R"""<FILL\s+[^>]*>""")
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = code.split('\n' )
lowerCAmelCase_ : Union[str, Any] = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
lowerCAmelCase_ : List[str] = f"class Bla:\n{code}"
lowerCAmelCase_ : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = style_docstrings_in_code(lowerCAmelCase__ )
return result[len('class Bla:\n' ) :] if has_indent else result
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]=False ) -> str:
"""simple docstring"""
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase_ : Dict = f.readlines()
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
lowerCAmelCase_ : int = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ : List[str] = search.groups()
lowerCAmelCase_ : str = find_code_in_diffusers(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = get_indent(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : Dict = theoretical_indent
lowerCAmelCase_ : Dict = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : Tuple = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
lowerCAmelCase_ : Optional[int] = lines[line_index]
lowerCAmelCase_ : int = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(f"^{indent}# End copy" , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase_ : str = lines[start_index:line_index]
lowerCAmelCase_ : str = ''.join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : Tuple = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
lowerCAmelCase_ : Union[str, Any] = '\n'.join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
lowerCAmelCase_ : List[str] = replace_pattern.replace('with' , '' ).split(',' )
lowerCAmelCase_ : int = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ : Optional[int] = pattern.groups()
lowerCAmelCase_ : Union[str, Any] = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
lowerCAmelCase_ : int = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : int = blackify(lines[start_index - 1] + theoretical_code )
lowerCAmelCase_ : List[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCAmelCase_ : str = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Optional[Any] = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f"Detected changes, rewriting {filename}." )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def UpperCamelCase_ ( lowerCAmelCase__ : bool = False ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : List[str] = glob.glob(os.path.join(lowerCAmelCase__ , '**/*.py' ) , recursive=lowerCAmelCase__ )
lowerCAmelCase_ : str = []
for filename in all_files:
lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
lowerCAmelCase_ : Dict = '\n'.join(lowerCAmelCase__ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase__ : Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 369
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict=1_3 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]=9_9 , SCREAMING_SNAKE_CASE_ : int=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=3_6 , SCREAMING_SNAKE_CASE_ : List[Any]=6 , SCREAMING_SNAKE_CASE_ : Tuple=6 , SCREAMING_SNAKE_CASE_ : List[Any]=6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : List[str]=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : Tuple=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : str = use_input_mask
lowerCAmelCase_ : Union[str, Any] = use_token_type_ids
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Any = embedding_size
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Optional[Any] = num_hidden_groups
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : List[Any] = type_vocab_size
lowerCAmelCase_ : Any = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Tuple = num_labels
lowerCAmelCase_ : Dict = num_choices
lowerCAmelCase_ : Tuple = scope
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = None
if self.use_input_mask:
lowerCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : str = None
if self.use_labels:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : Union[str, Any] = AlbertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : List[str] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : Optional[Any] = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , sentence_order_label=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : str = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase_ : List[str] = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Any = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : Union[str, Any] = self.num_labels
lowerCAmelCase_ : Union[str, Any] = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : List[str] = self.num_labels
lowerCAmelCase_ : List[Any] = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : Optional[Any] = self.num_choices
lowerCAmelCase_ : int = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase_ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=False ):
lowerCAmelCase_ : List[str] = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : str = AlbertModelTester(self )
lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : int = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[Any] = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Any = AlbertModel.from_pretrained('albert-base-v2' )
lowerCAmelCase_ : Tuple = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase_ : str = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 289
| 0
|
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = (EulerDiscreteScheduler,)
lowerCAmelCase_ = 10
def lowercase (self , **UpperCAmelCase ) -> str:
_snake_case = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**UpperCAmelCase )
return config
def lowercase (self ) -> Optional[Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def lowercase (self ) -> Tuple:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def lowercase (self ) -> Optional[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def lowercase (self ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def lowercase (self ) -> Tuple:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_snake_case = torch.manual_seed(0 )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
_snake_case = sample.to(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
_snake_case = model(UpperCAmelCase , UpperCAmelCase )
_snake_case = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(UpperCAmelCase ) )
_snake_case = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def lowercase (self ) -> Optional[int]:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(prediction_type="""v_prediction""" )
_snake_case = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_snake_case = torch.manual_seed(0 )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
_snake_case = sample.to(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
_snake_case = model(UpperCAmelCase , UpperCAmelCase )
_snake_case = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(UpperCAmelCase ) )
_snake_case = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def lowercase (self ) -> int:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_snake_case = sample.to(UpperCAmelCase )
for t in scheduler.timesteps:
_snake_case = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
_snake_case = model(UpperCAmelCase , UpperCAmelCase )
_snake_case = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(UpperCAmelCase ) )
_snake_case = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def lowercase (self ) -> int:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**UpperCAmelCase , use_karras_sigmas=UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_snake_case = sample.to(UpperCAmelCase )
for t in scheduler.timesteps:
_snake_case = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
_snake_case = model(UpperCAmelCase , UpperCAmelCase )
_snake_case = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(UpperCAmelCase ) )
_snake_case = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 341
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341
| 1
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [0]
__SCREAMING_SNAKE_CASE = [0]
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) , 0)
__SCREAMING_SNAKE_CASE = [6_0]
__SCREAMING_SNAKE_CASE = [1_0]
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) , 0)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = [1, 2, 3]
__SCREAMING_SNAKE_CASE = [3, 2, 1]
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) , 5)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = 5_0
__SCREAMING_SNAKE_CASE = [6_0, 1_0_0, 1_2_0]
__SCREAMING_SNAKE_CASE = [1_0, 2_0, 3_0]
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) , 2_2_0)
if __name__ == "__main__":
unittest.main()
| 255
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=3_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def snake_case_ ( self):
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = NezhaModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = NezhaModel(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , next_sentence_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : int = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__lowercase : Optional[Any] = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : List[Any] = True
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False):
__SCREAMING_SNAKE_CASE = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
if return_labels:
if model_class in get_values(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__)
return inputs_dict
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = NezhaModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase__)
def snake_case_ ( self):
# This regression test was failing with PyTorch < 1.3
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
@slow
@require_torch_gpu
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(config=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.jit.trace(
lowerCAmelCase__ , (inputs_dict["""input_ids"""].to("""cpu"""), inputs_dict["""attention_mask"""].to("""cpu""")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , """bert.pt"""))
__SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(lowerCAmelCase__ , """bert.pt""") , map_location=lowerCAmelCase__)
loaded(inputs_dict["""input_ids"""].to(lowerCAmelCase__) , inputs_dict["""attention_mask"""].to(lowerCAmelCase__))
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""")
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]])
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]])
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8))
self.assertEqual(output.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4))
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""")
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]])
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]])
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, 2_1_1_2_8))
self.assertEqual(output.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4))
| 255
| 1
|
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def A ( snake_case :np.ndarray , snake_case :float , snake_case :int = 1_6_0_0_0 ) -> List[str]:
__UpperCamelCase = int(round(sample_rate * max_length ) )
if len(snake_case ) <= sample_length:
return wav
__UpperCamelCase = randint(0 , len(snake_case ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __lowerCAmelCase :
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name of a dataset from the datasets package"} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "A file containing the training audio paths and labels."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "A file containing the validation audio paths and labels."} )
lowercase = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowercase = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowercase = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
lowercase = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowercase = field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class __lowerCAmelCase :
lowercase = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
lowercase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name or path of preprocessor config."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , __UpperCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def A ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , snake_case , snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
__UpperCamelCase = DatasetDict()
__UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'Make sure to set `--label_column_name` to the correct text column - one of '
f'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__UpperCamelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__UpperCamelCase = feature_extractor.model_input_names[0]
def train_transforms(snake_case :Tuple ):
__UpperCamelCase = []
for audio in batch[data_args.audio_column_name]:
__UpperCamelCase = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(snake_case )
__UpperCamelCase = feature_extractor(snake_case , sampling_rate=feature_extractor.sampling_rate )
__UpperCamelCase = {model_input_name: inputs.get(snake_case )}
__UpperCamelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case :Any ):
__UpperCamelCase = [audio['array'] for audio in batch[data_args.audio_column_name]]
__UpperCamelCase = feature_extractor(snake_case , sampling_rate=feature_extractor.sampling_rate )
__UpperCamelCase = {model_input_name: inputs.get(snake_case )}
__UpperCamelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__UpperCamelCase = raw_datasets['train'].features[data_args.label_column_name].names
__UpperCamelCase , __UpperCamelCase = {}, {}
for i, label in enumerate(snake_case ):
__UpperCamelCase = str(snake_case )
__UpperCamelCase = label
# Load the accuracy metric from the datasets package
__UpperCamelCase = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case :List[Any] ):
__UpperCamelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=snake_case , references=eval_pred.label_ids )
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case ) , labelaid=snake_case , idalabel=snake_case , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCamelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__UpperCamelCase = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case , output_all_columns=snake_case )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__UpperCamelCase = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case , output_all_columns=snake_case )
# Initialize our trainer
__UpperCamelCase = Trainer(
model=snake_case , args=snake_case , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=snake_case , tokenizer=snake_case , )
# Training
if training_args.do_train:
__UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
__UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCamelCase = last_checkpoint
__UpperCamelCase = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCamelCase = trainer.evaluate()
trainer.log_metrics('eval' , snake_case )
trainer.save_metrics('eval' , snake_case )
# Write model card and (optionally) push to hub
__UpperCamelCase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case )
else:
trainer.create_model_card(**snake_case )
if __name__ == "__main__":
main()
| 316
|
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCAmelCase : str = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=16 , snake_case__=13 , snake_case__=7 , snake_case__=14 , snake_case__=10 , snake_case__=19 , snake_case__=5 , snake_case__=4 , snake_case__=True , snake_case__=16 , snake_case__=2 , snake_case__=4 , snake_case__=4 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=[1, 2, 3, 4, 5] , snake_case__=25 , snake_case__=5 , ):
'''simple docstring'''
_lowerCAmelCase : str = d_model
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Optional[int] = prediction_length
_lowerCAmelCase : str = context_length
_lowerCAmelCase : Dict = cardinality
_lowerCAmelCase : List[str] = num_time_features
_lowerCAmelCase : List[str] = lags_sequence
_lowerCAmelCase : int = embedding_dimension
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = context_length
_lowerCAmelCase : Optional[Any] = prediction_length + label_length
_lowerCAmelCase : List[Any] = label_length
_lowerCAmelCase : str = moving_average
_lowerCAmelCase : str = autocorrelation_factor
def a ( self ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = config.context_length + max(config.lags_sequence )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_lowerCAmelCase : int = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, _past_length] )
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_lowerCAmelCase : Any = floats_tensor([self.batch_size, config.prediction_length] )
_lowerCAmelCase : Any = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_config()
_lowerCAmelCase : Optional[int] = self.prepare_autoformer_inputs_dict(snake_case__ )
return config, inputs_dict
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = AutoformerModel(config=snake_case__ ).to(snake_case__ ).eval()
_lowerCAmelCase : int = model(**snake_case__ )
_lowerCAmelCase : List[str] = outputs.encoder_last_hidden_state
_lowerCAmelCase : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : int = model.get_encoder()
encoder.save_pretrained(snake_case__ )
_lowerCAmelCase : Optional[Any] = AutoformerEncoder.from_pretrained(snake_case__ ).to(snake_case__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = model.create_network_inputs(**snake_case__ )
_lowerCAmelCase , _lowerCAmelCase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_lowerCAmelCase : int = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_lowerCAmelCase : Tuple = encoder(inputs_embeds=snake_case__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_lowerCAmelCase : List[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_lowerCAmelCase : Tuple = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_lowerCAmelCase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_lowerCAmelCase : Union[str, Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = AutoformerDecoder.from_pretrained(snake_case__ ).to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = decoder(
trend=snake_case__ , inputs_embeds=snake_case__ , encoder_hidden_states=snake_case__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__magic_name__ = (AutoformerForPrediction,) if is_torch_available() else ()
__magic_name__ = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = AutoformerModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model_class.from_pretrained(snake_case__ , output_loading_info=snake_case__ )
self.assertEqual(info['missing_keys'] , [] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case__ )
@unittest.skip(reason='Model has no tokens embeddings' )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = inspect.signature(getattr(snake_case__ , 'forward' ) )
# The main input is the name of the argument after `self`
_lowerCAmelCase : Optional[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(snake_case__ )
_lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : List[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : Any = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : str = getattr(self.model_tester , 'seq_length' , snake_case__ )
_lowerCAmelCase : str = getattr(self.model_tester , 'decoder_seq_length' , snake_case__ )
_lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , snake_case__ )
_lowerCAmelCase : Tuple = getattr(self.model_tester , 'd_model' , snake_case__ )
_lowerCAmelCase : List[str] = getattr(self.model_tester , 'num_attention_heads' , snake_case__ )
_lowerCAmelCase : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_lowerCAmelCase : str = True
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : int = True
_lowerCAmelCase : Optional[int] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : str = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
_lowerCAmelCase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Any = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
_lowerCAmelCase : Tuple = outputs.encoder_attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_lowerCAmelCase : Tuple = len(snake_case__ )
_lowerCAmelCase : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case__ , snake_case__ )
# decoder attentions
_lowerCAmelCase : int = outputs.decoder_attentions
self.assertIsInstance(snake_case__ , (list, tuple) )
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_lowerCAmelCase : Any = outputs.cross_attentions
self.assertIsInstance(snake_case__ , (list, tuple) )
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(out_len + 2 , len(snake_case__ ) )
_lowerCAmelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def a ( self ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def lowercase (_A="train-batch.pt" ):
"""simple docstring"""
_lowerCAmelCase : Any = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=_A , repo_type='dataset' )
_lowerCAmelCase : Any = torch.load(_A , map_location=_A )
return batch
@require_torch
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(snake_case__ )
_lowerCAmelCase : str = prepare_batch()
with torch.no_grad():
_lowerCAmelCase : List[str] = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
_lowerCAmelCase : Tuple = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case__ )
_lowerCAmelCase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=snake_case__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = prepare_batch('val-batch.pt' )
with torch.no_grad():
_lowerCAmelCase : Any = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
_lowerCAmelCase : str = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case__ )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=snake_case__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(snake_case__ )
_lowerCAmelCase : str = prepare_batch('val-batch.pt' )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
_lowerCAmelCase : Tuple = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case__ )
_lowerCAmelCase : Tuple = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=snake_case__ )
_lowerCAmelCase : List[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case__ , rtol=1E-1 ) )
| 25
|
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25
| 1
|
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _lowerCamelCase ( _UpperCamelCase = True , *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
__lowerCAmelCase = False
if main_process_only:
__lowerCAmelCase = PartialState().local_process_index == 0
return _tqdm(*_UpperCamelCase , **_UpperCamelCase , disable=_UpperCamelCase )
| 57
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : List[Any] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """beit"""
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple=8_1_9_2 , lowerCAmelCase_ : Optional[int]=7_6_8 , lowerCAmelCase_ : int=1_2 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Any=3_0_7_2 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=[3, 5, 7, 1_1] , lowerCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=0.4 , lowerCAmelCase_ : Tuple=2_5_6 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[int]=2_5_5 , **lowerCAmelCase_ : Any , ) -> Dict:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_mask_token
__lowerCAmelCase = use_absolute_position_embeddings
__lowerCAmelCase = use_relative_position_bias
__lowerCAmelCase = use_shared_relative_position_bias
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase = out_indices
__lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase = use_auxiliary_head
__lowerCAmelCase = auxiliary_loss_weight
__lowerCAmelCase = auxiliary_channels
__lowerCAmelCase = auxiliary_num_convs
__lowerCAmelCase = auxiliary_concat_input
__lowerCAmelCase = semantic_loss_ignore_index
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = version.parse("""1.11""" )
@property
def lowercase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase ( self : Optional[Any] ) -> float:
return 1e-4
| 284
| 0
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
__magic_name__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def snake_case ():
'''simple docstring'''
if os.name == "nt":
a : Any = CursorInfo()
a : str = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__A , ctypes.byref(__A ) )
a : Union[str, Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__A , ctypes.byref(__A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def snake_case ():
'''simple docstring'''
if os.name == "nt":
a : str = CursorInfo()
a : List[str] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__A , ctypes.byref(__A ) )
a : Optional[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__A , ctypes.byref(__A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def snake_case ():
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 357
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Optional[int] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
a : int = AutoTokenizer.from_pretrained('xlm-roberta-base' )
a : int = 'The dog is cute and lives in the garden house'
a : List[Any] = jnp.array([tokenizer.encode(A )] )
a : int = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
a : Dict = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
a : Any = model(A )['last_hidden_state']
self.assertEqual(output.shape , A )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , A , atol=1E-3 ) )
| 186
| 0
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: str =[
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: Tuple =emb.weight.shape
lowerCamelCase__: Tuple =nn.Linear(__a , __a , bias=__a )
lowerCamelCase__: int =emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __a , __a=None ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Optional[Any] ={}
for old_key in state_dict.keys():
lowerCamelCase__: List[Any] =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase__: int =key.replace("moe_layer.experts.0" , F"""ffn.experts.expert_{expert_idx}""" )
else:
lowerCamelCase__: Optional[Any] =key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
lowerCamelCase__: int =key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
lowerCamelCase__: Tuple =key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
lowerCamelCase__: Tuple =key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
lowerCamelCase__: int =key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
lowerCamelCase__: Optional[int] =key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
lowerCamelCase__: List[str] =key.replace("final_layer_norm" , "ff_layer_norm" )
lowerCamelCase__: List[str] =state_dict[old_key]
return new_dict
def lowerCAmelCase_ ( __a , __a , __a , __a , __a = WEIGHTS_NAME ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Any =0
os.makedirs(__a , exist_ok=__a )
for expert in range(__a ):
lowerCamelCase__: Any =switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(__a ):
lowerCamelCase__: Union[str, Any] =torch.load(__a )["model"]
remove_ignore_keys_(__a )
lowerCamelCase__: int =rename_fairseq_keys(__a , __a )
lowerCamelCase__: Any =os.path.join(
__a , weights_name.replace(".bin" , F"""-{len(__a )+1:05d}-of-???.bin""" ) )
torch.save(__a , __a )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__a )[0]].dtype )
# Add the last block
lowerCamelCase__: Optional[int] =os.path.join(__a , weights_name.replace(".bin" , F"""-{len(__a )+1:05d}-of-???.bin""" ) )
lowerCamelCase__: str =torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(__a )
lowerCamelCase__: Any =rename_fairseq_keys(__a , __a )
lowerCamelCase__: Optional[Any] =shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__a ) == 1:
lowerCamelCase__: Optional[int] =os.path.join(__a , __a )
torch.save(__a , __a )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__a , __a )
# Otherwise, let's build the index
lowerCamelCase__: Dict ={}
for idx, shard in enumerate(__a ):
lowerCamelCase__: str =weights_name.replace(".bin" , F"""-{idx+1:05d}-of-{len(__a ):05d}.bin""" )
lowerCamelCase__: Optional[Any] =os.path.join(__a , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__a , os.path.join(__a , __a ) )
for key in shard:
lowerCamelCase__: List[str] =shard_file
# Add the metadata
lowerCamelCase__: List[str] ={"total_size": total_size}
lowerCamelCase__: List[Any] ={"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__a , __a ) , "w" , encoding="utf-8" ) as f:
lowerCamelCase__: str =json.dumps(__a , indent=2 , sort_keys=__a ) + "\n"
f.write(__a )
return metadata, index
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__A = parser.parse_args()
__A , __A = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__A = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__A = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 10
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , **A ) -> List[str]:
super().__init__(**A )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , A , **A ) -> Optional[Any]:
return super().__call__(A , **A )
def _lowercase( self , **A ) -> Optional[Any]:
UpperCAmelCase : List[Any] = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Dict = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]:
UpperCAmelCase : int = load_image(A )
UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCAmelCase : List[str] = candidate_labels
UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels]
UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A )
UpperCAmelCase : Union[str, Any] = [text_inputs]
return inputs
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" )
UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , A ):
UpperCAmelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
UpperCAmelCase : Any = text_inputs[0][0]
UpperCAmelCase : Dict = self.model(**A , **A )
UpperCAmelCase : List[Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _lowercase( self , A ) -> Union[str, Any]:
UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" )
UpperCAmelCase : int = model_outputs["""logits"""][0]
if self.framework == "pt":
UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCAmelCase : Any = probs.tolist()
if not isinstance(A , A ):
UpperCAmelCase : Any = [scores]
elif self.framework == "tf":
UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 )
UpperCAmelCase : Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase : Any = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] )
]
return result
| 265
| 0
|
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
UpperCAmelCase_= [[] for _ in range(lowerCAmelCase_ )]
UpperCAmelCase_= key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(lowerCAmelCase_ ) <= key:
return input_string
for position, character in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_= position % (lowest * 2) # puts it in bounds
UpperCAmelCase_= min(lowerCAmelCase_ ,lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCAmelCase_ )
UpperCAmelCase_= ["""""".join(lowerCAmelCase_ ) for row in temp_grid]
UpperCAmelCase_= """""".join(lowerCAmelCase_ )
return output_string
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
UpperCAmelCase_= []
UpperCAmelCase_= key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
UpperCAmelCase_= [[] for _ in range(lowerCAmelCase_ )] # generates template
for position in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_= position % (lowest * 2) # puts it in bounds
UpperCAmelCase_= min(lowerCAmelCase_ ,lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
UpperCAmelCase_= 0
for row in temp_grid: # fills in the characters
UpperCAmelCase_= input_string[counter : counter + len(lowerCAmelCase_ )]
grid.append(list(lowerCAmelCase_ ) )
counter += len(lowerCAmelCase_ )
UpperCAmelCase_= """""" # reads as zigzag
for position in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_= position % (lowest * 2) # puts it in bounds
UpperCAmelCase_= min(lowerCAmelCase_ ,lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __a ( lowerCAmelCase_ : str ) -> dict[int, str]:
'''simple docstring'''
UpperCAmelCase_= {}
for key_guess in range(1 ,len(lowerCAmelCase_ ) ): # tries every key
UpperCAmelCase_= decrypt(lowerCAmelCase_ ,lowerCAmelCase_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : WhisperForConditionalGeneration , __UpperCAmelCase : WhisperProcessor , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCAmelCase : StableDiffusionSafetyChecker , __UpperCAmelCase : CLIPImageProcessor , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__UpperCAmelCase , speech_processor=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> List[Any]:
if slice_size == "auto":
UpperCAmelCase_= self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
self.enable_attention_slicing(__UpperCAmelCase )
@torch.no_grad()
def __call__( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=16_000 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Union[str, Any] , ) -> Any:
UpperCAmelCase_= self.speech_processor.feature_extractor(
__UpperCAmelCase , return_tensors="""pt""" , sampling_rate=__UpperCAmelCase ).input_features.to(self.device )
UpperCAmelCase_= self.speech_model.generate(__UpperCAmelCase , max_length=480_000 )
UpperCAmelCase_= self.speech_processor.tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , normalize=__UpperCAmelCase )[
0
]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= len(__UpperCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__UpperCAmelCase )}.""" )
# get prompt text embeddings
UpperCAmelCase_= self.tokenizer(
__UpperCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCAmelCase_= text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_= self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCAmelCase_= text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase_= self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= text_embeddings.shape
UpperCAmelCase_= text_embeddings.repeat(1 , __UpperCAmelCase , 1 )
UpperCAmelCase_= text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase_= guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_= 42
if negative_prompt is None:
UpperCAmelCase_= [""""""] * batch_size
elif type(__UpperCAmelCase ) is not type(__UpperCAmelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase )} !="""
F""" {type(__UpperCAmelCase )}.""" )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= [negative_prompt]
elif batch_size != len(__UpperCAmelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
UpperCAmelCase_= negative_prompt
UpperCAmelCase_= text_input_ids.shape[-1]
UpperCAmelCase_= self.tokenizer(
__UpperCAmelCase , padding="""max_length""" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="""pt""" , )
UpperCAmelCase_= self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_= uncond_embeddings.shape[1]
UpperCAmelCase_= uncond_embeddings.repeat(1 , __UpperCAmelCase , 1 )
UpperCAmelCase_= uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_= torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase_= (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase_= text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase_= torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device="""cpu""" , dtype=__UpperCAmelCase ).to(
self.device )
else:
UpperCAmelCase_= torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCAmelCase_= latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase_= self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_= latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_= """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_= {}
if accepts_eta:
UpperCAmelCase_= eta
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_= torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_= self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase_, UpperCAmelCase_= noise_pred.chunk(2 )
UpperCAmelCase_= noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_= self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= 1 / 0.18_215 * latents
UpperCAmelCase_= self.vae.decode(__UpperCAmelCase ).sample
UpperCAmelCase_= (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_= self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCAmelCase , nsfw_content_detected=__UpperCAmelCase )
| 277
| 1
|
"""simple docstring"""
class __A :
def __init__( self , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = name
_lowerCAmelCase : List[str] = value
_lowerCAmelCase : int = weight
def __repr__( self ):
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def __A ( self ):
return self.value
def __A ( self ):
return self.name
def __A ( self ):
return self.weight
def __A ( self ):
return self.value / self.weight
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[str] ,_lowerCamelCase : Optional[Any] ) -> str:
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_lowerCamelCase ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Dict ,_lowerCamelCase : Any ) -> Union[str, Any]:
_lowerCAmelCase : str = sorted(_lowerCamelCase ,key=_lowerCamelCase ,reverse=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase , _lowerCAmelCase : Any = 0.0, 0.0
for i in range(len(_lowerCamelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = BeautifulSoup(requests.get(lowerCamelCase , params=lowerCamelCase ).content , 'html.parser' )
UpperCAmelCase__ = soup.find('div' , attrs={'class': 'gs_ri'} )
UpperCAmelCase__ = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 98
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowerCamelCase : Optional[Any] = 8
def __lowerCamelCase ( A__ , A__=BITS ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = x.device
UpperCamelCase = (x * 255).int().clamp(0 , 255 )
UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=A__ )
UpperCamelCase = rearrange(A__ , 'd -> d 1 1' )
UpperCamelCase = rearrange(A__ , 'b c h w -> b c 1 h w' )
UpperCamelCase = ((x & mask) != 0).float()
UpperCamelCase = rearrange(A__ , 'b c d h w -> b (c d) h w' )
UpperCamelCase = bits * 2 - 1
return bits
def __lowerCamelCase ( A__ , A__=BITS ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = x.device
UpperCamelCase = (x > 0).int()
UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=A__ , dtype=torch.intaa )
UpperCamelCase = rearrange(A__ , 'd -> d 1 1' )
UpperCamelCase = rearrange(A__ , 'b (c d) h w -> b c d h w' , d=8 )
UpperCamelCase = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def __lowerCamelCase ( self , A__ , A__ , A__ , A__ = 0.0 , A__ = True , A__=None , A__ = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCamelCase = self.alphas_cumprod[timestep]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCamelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCamelCase = self.bit_scale
if self.config.clip_sample:
UpperCamelCase = torch.clamp(A__ , -scale , A__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCamelCase = self._get_variance(A__ , A__ )
UpperCamelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCamelCase = model_output.device if torch.is_tensor(A__ ) else 'cpu'
UpperCamelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=A__ ).to(A__ )
UpperCamelCase = self._get_variance(A__ , A__ ) ** 0.5 * eta * noise
UpperCamelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=A__ , pred_original_sample=A__ )
def __lowerCamelCase ( self , A__ , A__ , A__ , A__="epsilon" , A__=None , A__ = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCamelCase , UpperCamelCase = torch.split(A__ , sample.shape[1] , dim=1 )
else:
UpperCamelCase = None
# 1. compute alphas, betas
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCamelCase = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
UpperCamelCase = self.bit_scale
if self.config.clip_sample:
UpperCamelCase = torch.clamp(A__ , -scale , A__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase = 0
if t > 0:
UpperCamelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=A__ ).to(model_output.device )
UpperCamelCase = (self._get_variance(A__ , predicted_variance=A__ ) ** 0.5) * noise
UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=A__ , pred_original_sample=A__ )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase__ : Optional[float] = 1.0 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = bit_scale
UpperCamelCase = (
ddim_bit_scheduler_step if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self : List[str] , UpperCamelCase__ : Optional[int] = 2_5_6 , UpperCamelCase__ : Optional[int] = 2_5_6 , UpperCamelCase__ : Optional[int] = 5_0 , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCamelCase__ , )
UpperCamelCase = decimal_to_bits(UpperCamelCase__ ) * self.bit_scale
UpperCamelCase = latents.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
UpperCamelCase = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
UpperCamelCase = bits_to_decimal(UpperCamelCase__ )
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 249
|
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowerCamelCase : Dict = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
_lowerCamelCase : Any = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
_lowerCamelCase : Dict = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowerCamelCase : List[str] = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowerCamelCase : Any = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(A__ , A__ )
return k
def __lowerCamelCase ( A__ , A__ ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**A__ )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(A__ )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
UpperCamelCase = [k.endswith(A__ ) for ending in KEYS_TO_IGNORE]
if any(A__ ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(A__ , A__ )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(A__ )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
UpperCamelCase = [k.endswith(A__ ) for ending in KEYS_TO_IGNORE]
if any(A__ ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(A__ , A__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(A__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
UpperCamelCase = mapping['model.embed_positions.weight']
UpperCamelCase = mapping.pop('model.embed_positions.weight' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(A__ , strict=A__ )
UpperCamelCase = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(A__ )
UpperCamelCase = {}
UpperCamelCase = ['global_step']
for name, shape in tqdm(A__ , desc='converting tf checkpoint to dict' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(A__ , A__ )
UpperCamelCase = array
return tf_weights
def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(A__ )
UpperCamelCase = convert_bigbird_pegasus(A__ , A__ )
torch_model.save_pretrained(A__ )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCamelCase : Tuple = parser.parse_args()
_lowerCamelCase : Tuple = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 249
| 1
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( A_ ):
A__ : Dict = (DDIMParallelScheduler,)
A__ : Tuple = (("eta", 0.0), ("num_inference_steps", 50))
def _SCREAMING_SNAKE_CASE (self : Tuple , **snake_case__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**snake_case__ )
return config
def _SCREAMING_SNAKE_CASE (self : Dict , **snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : List[Any] = self.scheduler_classes[0]
snake_case : Any = self.get_scheduler_config(**snake_case__ )
snake_case : Any = scheduler_class(**snake_case__ )
snake_case , snake_case : Union[str, Any] = 10, 0.0
snake_case : List[Any] = self.dummy_model()
snake_case : Any = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for t in scheduler.timesteps:
snake_case : Optional[int] = model(snake_case__ , snake_case__ )
snake_case : List[str] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str:
'''simple docstring'''
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case__ )
snake_case : Optional[int] = self.scheduler_classes[0]
snake_case : Optional[int] = self.get_scheduler_config(steps_offset=1 )
snake_case : Union[str, Any] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=snake_case__ , num_inference_steps=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=snake_case__ , eta=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = self.scheduler_classes[0]
snake_case : Tuple = self.get_scheduler_config()
snake_case : Dict = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = self.scheduler_classes[0]
snake_case : List[Any] = self.get_scheduler_config()
snake_case : int = scheduler_class(**snake_case__ )
snake_case , snake_case : Any = 10, 0.0
scheduler.set_timesteps(snake_case__ )
snake_case : Optional[Any] = self.dummy_model()
snake_case : str = self.dummy_sample_deter
snake_case : Dict = self.dummy_sample_deter + 0.1
snake_case : Dict = self.dummy_sample_deter - 0.1
snake_case : Optional[Any] = samplea.shape[0]
snake_case : str = torch.stack([samplea, samplea, samplea] , dim=0 )
snake_case : Tuple = torch.arange(snake_case__ )[0:3, None].repeat(1 , snake_case__ )
snake_case : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
snake_case : List[str] = scheduler.batch_step_no_noise(snake_case__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case__ )
snake_case : Dict = torch.sum(torch.abs(snake_case__ ) )
snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = self.full_loop()
snake_case : Optional[Any] = torch.sum(torch.abs(snake_case__ ) )
snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.223967 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.full_loop(prediction_type="v_prediction" )
snake_case : int = torch.sum(torch.abs(snake_case__ ) )
snake_case : Optional[int] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 )
snake_case : str = torch.sum(torch.abs(snake_case__ ) )
snake_case : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 )
snake_case : Tuple = torch.sum(torch.abs(snake_case__ ) )
snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 59
|
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ):
snake_case : list[list[str]] = [[] for _ in range(__lowerCamelCase )]
snake_case : int = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(__lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(__lowerCamelCase ):
snake_case : Any = position % (lowest * 2) # puts it in bounds
snake_case : Optional[int] = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__lowerCamelCase )
snake_case : List[str] = ["".join(__lowerCamelCase ) for row in temp_grid]
snake_case : Tuple = "".join(__lowerCamelCase )
return output_string
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int ):
snake_case : Dict = []
snake_case : Union[str, Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
snake_case : list[list[str]] = [[] for _ in range(__lowerCamelCase )] # generates template
for position in range(len(__lowerCamelCase ) ):
snake_case : List[str] = position % (lowest * 2) # puts it in bounds
snake_case : Optional[int] = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
snake_case : Tuple = 0
for row in temp_grid: # fills in the characters
snake_case : Union[str, Any] = input_string[counter : counter + len(__lowerCamelCase )]
grid.append(list(__lowerCamelCase ) )
counter += len(__lowerCamelCase )
snake_case : str = "" # reads as zigzag
for position in range(len(__lowerCamelCase ) ):
snake_case : Optional[int] = position % (lowest * 2) # puts it in bounds
snake_case : Tuple = min(__lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Tuple = {}
for key_guess in range(1 , len(__lowerCamelCase ) ): # tries every key
snake_case : Any = decrypt(__lowerCamelCase , __lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _snake_case ( _snake_case : Dict ) -> Tuple:
'''simple docstring'''
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=_snake_case )
_A = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_snake_case )
EnvironmentCommand.register_subcommand(_snake_case )
TestCommand.register_subcommand(_snake_case )
RunBeamCommand.register_subcommand(_snake_case )
DummyDataCommand.register_subcommand(_snake_case )
# Parse args
_A , _A = parser.parse_known_args()
if not hasattr(_snake_case , 'func' ):
parser.print_help()
exit(1 )
_A = parse_unknown_args(_snake_case )
# Run
_A = args.func(_snake_case , **_snake_case )
service.run()
if __name__ == "__main__":
main()
| 271
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
a = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def _snake_case ( _snake_case : str , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple=None ) -> List[str]:
'''simple docstring'''
_A = XLNetConfig.from_json_file(_snake_case )
_A = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_A = finetuning_task
_A = GLUE_TASKS_NUM_LABELS[finetuning_task]
_A = XLNetForSequenceClassification(_snake_case )
elif "squad" in finetuning_task:
_A = finetuning_task
_A = XLNetForQuestionAnswering(_snake_case )
else:
_A = XLNetLMHeadModel(_snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
_A = os.path.join(_snake_case , _snake_case )
_A = os.path.join(_snake_case , _snake_case )
print(F'''Save PyTorch model to {os.path.abspath(_snake_case )}''' )
torch.save(model.state_dict() , _snake_case )
print(F'''Save configuration file to {os.path.abspath(_snake_case )}''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
a = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 271
| 1
|
def __UpperCAmelCase ( __a : int ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
_a : Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_a : List[str] = 1
if upper_limit > 0:
_a : Dict = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 ,upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
a__ = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 235
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ = logging.get_logger(__name__)
a__ = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase , __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = "bit"
UpperCAmelCase__ : Optional[int] = ["preactivation", "bottleneck"]
UpperCAmelCase__ : Optional[Any] = ["SAME", "VALID"]
def __init__( self , _a=3 , _a=6_4 , _a=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _a=[3, 4, 6, 3] , _a="preactivation" , _a="relu" , _a=None , _a=3_2 , _a=0.0 , _a=False , _a=3_2 , _a=1 , _a=None , _a=None , **_a , ) -> Union[str, Any]:
super().__init__(**_a )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_a : Any = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_a : Optional[int] = num_channels
_a : List[Any] = embedding_size
_a : Any = hidden_sizes
_a : int = depths
_a : Dict = layer_type
_a : int = hidden_act
_a : Optional[Any] = global_padding
_a : Optional[Any] = num_groups
_a : Union[str, Any] = drop_path_rate
_a : Tuple = embedding_dynamic_padding
_a : Union[str, Any] = output_stride
_a : Any = width_factor
_a : Any = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_a ) + 1 )]
_a , _a : List[str] = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 235
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Any = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = 'pegasus'
lowerCamelCase__ : Optional[Any] = ['past_key_values']
lowerCamelCase__ : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, lowerCamelCase_=5_0_2_6_5, lowerCamelCase_=1_0_2_4, lowerCamelCase_=1_2, lowerCamelCase_=4_0_9_6, lowerCamelCase_=1_6, lowerCamelCase_=1_2, lowerCamelCase_=4_0_9_6, lowerCamelCase_=1_6, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_="gelu", lowerCamelCase_=1_0_2_4, lowerCamelCase_=0.1, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.02, lowerCamelCase_=0, lowerCamelCase_=False, lowerCamelCase_=0, lowerCamelCase_=1, lowerCamelCase_=1, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : str = max_position_embeddings
lowerCamelCase__ : Optional[int] = d_model
lowerCamelCase__ : Union[str, Any] = encoder_ffn_dim
lowerCamelCase__ : List[Any] = encoder_layers
lowerCamelCase__ : Union[str, Any] = encoder_attention_heads
lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCamelCase__ : str = decoder_layers
lowerCamelCase__ : List[str] = decoder_attention_heads
lowerCamelCase__ : Dict = dropout
lowerCamelCase__ : Any = attention_dropout
lowerCamelCase__ : Tuple = activation_dropout
lowerCamelCase__ : Any = activation_function
lowerCamelCase__ : int = init_std
lowerCamelCase__ : List[str] = encoder_layerdrop
lowerCamelCase__ : Union[str, Any] = decoder_layerdrop
lowerCamelCase__ : Tuple = use_cache
lowerCamelCase__ : Any = encoder_layers
lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, is_encoder_decoder=lowerCamelCase_, decoder_start_token_id=lowerCamelCase_, forced_eos_token_id=lowerCamelCase_, **lowerCamelCase_, )
@property
def a__ (self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def a__ (self ):
'''simple docstring'''
return self.d_model
| 358
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.