code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
raise NotImplementedError()
| 606
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env" )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=1 ) -> Tuple:
"""simple docstring"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
TrainingJobAnalytics(SCREAMING_SNAKE_CASE ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
# create estimator
UpperCamelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , SCREAMING_SNAKE_CASE )
| 606
| 1
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 708
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCamelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] ) ->str:
# Construct model
if openai_config_file == "":
snake_case = OpenAIGPTConfig()
else:
snake_case = OpenAIGPTConfig.from_json_file(a )
snake_case = OpenAIGPTModel(a )
# Load weights from numpy
load_tf_weights_in_openai_gpt(a , a , a )
# Save pytorch-model
snake_case = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , a )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
_lowercase = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 342
|
'''simple docstring'''
import enum
import shutil
import sys
_lowercase , _lowercase = shutil.get_terminal_size()
_lowercase = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class _lowercase ( enum.Enum ):
_UpperCAmelCase = 0
_UpperCAmelCase = 1
def __UpperCamelCase ( a : Optional[Any] , a : Tuple="" ) ->Tuple:
sys.stdout.write(str(a ) + end )
sys.stdout.flush()
def __UpperCamelCase ( a : int , a : Dict , a : Optional[Any]="" ) ->Dict:
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , a )
def __UpperCamelCase ( ) ->Dict:
forceWrite('''\r''' )
def __UpperCamelCase ( a : int , a : str ) ->Optional[int]:
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def __UpperCamelCase ( ) ->Union[str, Any]:
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __UpperCamelCase ( ) ->Any:
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 342
| 1
|
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4
| 0
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def a_ ( lowerCamelCase : List[str] ):
lowerCAmelCase = []
for line in lines:
lowerCAmelCase = re.sub(R'#.*' , '' , lowerCamelCase ) # remove comments
if line:
filtered_lines.append(lowerCamelCase )
lowerCAmelCase = '\n'.join(lowerCamelCase )
# Make a hash from all this code
lowerCAmelCase = full_str.encode('utf-8' )
return shaaaa(lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
__snake_case ={
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__snake_case ={
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__snake_case ={"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__snake_case ={}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 133
|
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__snake_case =[
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : bool , UpperCAmelCase__ : str = None , UpperCAmelCase__ : list = None ) -> Union[str, Any]:
lowerCAmelCase = None
lowerCAmelCase = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
lowerCAmelCase = os.path.abspath('examples' )
for item in os.listdir(UpperCAmelCase__ ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
if os.path.isfile(UpperCAmelCase__ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase__ , feature_script=UpperCAmelCase__ , tested_section='main()' if parser_only else 'training_function()' , ):
lowerCAmelCase = compare_against_test(
os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = '\n'.join(UpperCAmelCase__ )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase = diff.replace(UpperCAmelCase__ , '' )
self.assertEqual(UpperCAmelCase__ , '' )
def __UpperCAmelCase ( self : int ) -> int:
self.one_complete_example('complete_nlp_example.py' , UpperCAmelCase__ )
self.one_complete_example('complete_nlp_example.py' , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
lowerCAmelCase = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
lowerCAmelCase = [
' ' * 1_6 + '{\n\n',
' ' * 2_0 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 2_0 + '"f1": eval_metric["f1"],\n\n',
' ' * 2_0 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 2_0 + '"epoch": epoch,\n\n',
' ' * 1_6 + '},\n\n',
' ' * 1_6 + 'step=epoch,\n',
' ' * 1_2,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.one_complete_example('complete_cv_example.py' , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = False
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] ) -> Union[str, Any]:
super().setUpClass()
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def __UpperCAmelCase ( cls : Optional[int] ) -> Optional[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
lowerCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
'''.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
self.assertNotIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
'''.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
else:
lowerCAmelCase = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
else:
self.assertIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : List[str] ) -> str:
lowerCAmelCase = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
lowerCAmelCase = re.findall('({.+})' , UpperCAmelCase__ )
lowerCAmelCase = [r for r in results if 'accuracy' in r][-1]
lowerCAmelCase = ast.literal_eval(UpperCAmelCase__ )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def __UpperCAmelCase ( self : Any ) -> int:
lowerCAmelCase = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self : Tuple ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'tracking' ) ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
lowerCAmelCase = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 133
| 1
|
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCamelCase ( _A ) -> List[str]:
lowercase : Union[str, Any] = {}
lowercase : int = job["""started_at"""]
lowercase : List[str] = job["""completed_at"""]
lowercase : List[str] = date_parser.parse(_A )
lowercase : Dict = date_parser.parse(_A )
lowercase : Dict = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowercase : List[str] = start
lowercase : Optional[int] = end
lowercase : str = duration_in_min
return job_info
def UpperCamelCase ( _A , _A=None ) -> Optional[Any]:
lowercase : Optional[Any] = None
if token is not None:
lowercase : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowercase : Union[str, Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowercase : Any = requests.get(_A , headers=_A ).json()
lowercase : List[Any] = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(_A ) for job in result["""jobs"""]} )
lowercase : int = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(_A ):
lowercase : Tuple = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(_A ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = get_job_time(args.workflow_run_id)
_lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'{k}: {v["duration"]}')
| 711
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class UpperCamelCase (unittest.TestCase ):
def __snake_case ( self :Optional[Any] ) ->str:
lowercase : Dict = get_activation("""swish""" )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __snake_case ( self :Union[str, Any] ) ->Any:
lowercase : Any = get_activation("""silu""" )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __snake_case ( self :str ) ->str:
lowercase : Tuple = get_activation("""mish""" )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __snake_case ( self :List[str] ) ->Union[str, Any]:
lowercase : Optional[Any] = get_activation("""gelu""" )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 348
| 0
|
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class snake_case ( nn.Module ):
def __init__( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , )-> int:
'''simple docstring'''
super().__init__()
__lowerCAmelCase: List[Any] = only_cross_attention
__lowerCAmelCase: str = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
__lowerCAmelCase: int = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.")
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__lowerCAmelCase: int = AdaLayerNorm(__lowercase , __lowercase)
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase: Dict = AdaLayerNormZero(__lowercase , __lowercase)
else:
__lowerCAmelCase: List[Any] = nn.LayerNorm(__lowercase , elementwise_affine=__lowercase)
__lowerCAmelCase: Optional[Any] = Attention(
query_dim=__lowercase , heads=__lowercase , dim_head=__lowercase , dropout=__lowercase , bias=__lowercase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__lowercase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__lowerCAmelCase: Optional[Any] = (
AdaLayerNorm(__lowercase , __lowercase)
if self.use_ada_layer_norm
else nn.LayerNorm(__lowercase , elementwise_affine=__lowercase)
)
__lowerCAmelCase: Optional[Any] = Attention(
query_dim=__lowercase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__lowercase , dim_head=__lowercase , dropout=__lowercase , bias=__lowercase , upcast_attention=__lowercase , ) # is self-attn if encoder_hidden_states is none
else:
__lowerCAmelCase: Optional[Any] = None
__lowerCAmelCase: Optional[Any] = None
# 3. Feed-forward
__lowerCAmelCase: Any = nn.LayerNorm(__lowercase , elementwise_affine=__lowercase)
__lowerCAmelCase: Optional[Any] = FeedForward(__lowercase , dropout=__lowercase , activation_fn=__lowercase , final_dropout=__lowercase)
# let chunk size default to None
__lowerCAmelCase: str = None
__lowerCAmelCase: int = 0
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int)-> int:
'''simple docstring'''
__lowerCAmelCase: int = chunk_size
__lowerCAmelCase: Dict = dim
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , )-> int:
'''simple docstring'''
if self.use_ada_layer_norm:
__lowerCAmelCase: Tuple = self.norma(__lowercase , __lowercase)
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: int = self.norma(
__lowercase , __lowercase , __lowercase , hidden_dtype=hidden_states.dtype)
else:
__lowerCAmelCase: List[Any] = self.norma(__lowercase)
__lowerCAmelCase: Any = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__lowerCAmelCase: List[Any] = self.attna(
__lowercase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__lowercase , **__lowercase , )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase: Dict = gate_msa.unsqueeze(1) * attn_output
__lowerCAmelCase: List[str] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__lowerCAmelCase: Optional[Any] = (
self.norma(__lowercase , __lowercase) if self.use_ada_layer_norm else self.norma(__lowercase)
)
__lowerCAmelCase: List[Any] = self.attna(
__lowercase , encoder_hidden_states=__lowercase , attention_mask=__lowercase , **__lowercase , )
__lowerCAmelCase: Tuple = attn_output + hidden_states
# 3. Feed-forward
__lowerCAmelCase: str = self.norma(__lowercase)
if self.use_ada_layer_norm_zero:
__lowerCAmelCase: Any = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.")
__lowerCAmelCase: Any = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__lowerCAmelCase: Any = torch.cat(
[self.ff(__lowercase) for hid_slice in norm_hidden_states.chunk(__lowercase , dim=self._chunk_dim)] , dim=self._chunk_dim , )
else:
__lowerCAmelCase: Dict = self.ff(__lowercase)
if self.use_ada_layer_norm_zero:
__lowerCAmelCase: Union[str, Any] = gate_mlp.unsqueeze(1) * ff_output
__lowerCAmelCase: List[Any] = ff_output + hidden_states
return hidden_states
class snake_case ( nn.Module ):
def __init__( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , )-> Optional[Any]:
'''simple docstring'''
super().__init__()
__lowerCAmelCase: Any = int(dim * mult)
__lowerCAmelCase: str = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__lowerCAmelCase: List[Any] = GELU(__lowercase , __lowercase)
if activation_fn == "gelu-approximate":
__lowerCAmelCase: Any = GELU(__lowercase , __lowercase , approximate="tanh")
elif activation_fn == "geglu":
__lowerCAmelCase: Union[str, Any] = GEGLU(__lowercase , __lowercase)
elif activation_fn == "geglu-approximate":
__lowerCAmelCase: List[Any] = ApproximateGELU(__lowercase , __lowercase)
__lowerCAmelCase: List[str] = nn.ModuleList([])
# project in
self.net.append(__lowercase)
# project dropout
self.net.append(nn.Dropout(__lowercase))
# project out
self.net.append(nn.Linear(__lowercase , __lowercase))
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__lowercase))
def lowercase_ ( self : str , UpperCamelCase__ : Optional[int])-> Dict:
'''simple docstring'''
for module in self.net:
__lowerCAmelCase: Any = module(__lowercase)
return hidden_states
class snake_case ( nn.Module ):
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none")-> Any:
'''simple docstring'''
super().__init__()
__lowerCAmelCase: Dict = nn.Linear(__lowercase , __lowercase)
__lowerCAmelCase: Tuple = approximate
def lowercase_ ( self : Dict , UpperCamelCase__ : List[Any])-> int:
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__lowercase , approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa) , approximate=self.approximate).to(dtype=gate.dtype)
def lowercase_ ( self : Any , UpperCamelCase__ : Tuple)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.proj(__lowercase)
__lowerCAmelCase: Union[str, Any] = self.gelu(__lowercase)
return hidden_states
class snake_case ( nn.Module ):
def __init__( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int)-> Tuple:
'''simple docstring'''
super().__init__()
__lowerCAmelCase: List[str] = nn.Linear(__lowercase , dim_out * 2)
def lowercase_ ( self : Tuple , UpperCamelCase__ : List[str])-> Optional[int]:
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__lowercase)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype)
def lowercase_ ( self : str , UpperCamelCase__ : int)-> Tuple:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.proj(__lowercase).chunk(2 , dim=-1)
return hidden_states * self.gelu(__lowercase)
class snake_case ( nn.Module ):
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int)-> Any:
'''simple docstring'''
super().__init__()
__lowerCAmelCase: Optional[int] = nn.Linear(__lowercase , __lowercase)
def lowercase_ ( self : List[Any] , UpperCamelCase__ : List[str])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.proj(__lowercase)
return x * torch.sigmoid(1.702 * x)
class snake_case ( nn.Module ):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any)-> Any:
'''simple docstring'''
super().__init__()
__lowerCAmelCase: Optional[int] = nn.Embedding(__lowercase , __lowercase)
__lowerCAmelCase: Optional[int] = nn.SiLU()
__lowerCAmelCase: Optional[int] = nn.Linear(__lowercase , embedding_dim * 2)
__lowerCAmelCase: Dict = nn.LayerNorm(__lowercase , elementwise_affine=__lowercase)
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.linear(self.silu(self.emb(__lowercase)))
__lowerCAmelCase , __lowerCAmelCase: Tuple = torch.chunk(__lowercase , 2)
__lowerCAmelCase: Dict = self.norm(__lowercase) * (1 + scale) + shift
return x
class snake_case ( nn.Module ):
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str)-> Dict:
'''simple docstring'''
super().__init__()
__lowerCAmelCase: Union[str, Any] = CombinedTimestepLabelEmbeddings(__lowercase , __lowercase)
__lowerCAmelCase: Dict = nn.SiLU()
__lowerCAmelCase: Union[str, Any] = nn.Linear(__lowercase , 6 * embedding_dim , bias=__lowercase)
__lowerCAmelCase: Dict = nn.LayerNorm(__lowercase , elementwise_affine=__lowercase , eps=1e-6)
def lowercase_ ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : int=None)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.linear(self.silu(self.emb(__lowercase , __lowercase , hidden_dtype=__lowercase)))
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[int] = emb.chunk(6 , dim=1)
__lowerCAmelCase: List[Any] = self.norm(__lowercase) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class snake_case ( nn.Module ):
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5)-> List[Any]:
'''simple docstring'''
super().__init__()
__lowerCAmelCase: Optional[int] = num_groups
__lowerCAmelCase: Optional[Any] = eps
if act_fn is None:
__lowerCAmelCase: List[str] = None
else:
__lowerCAmelCase: int = get_activation(__lowercase)
__lowerCAmelCase: Dict = nn.Linear(__lowercase , out_dim * 2)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict)-> Any:
'''simple docstring'''
if self.act:
__lowerCAmelCase: Optional[int] = self.act(__lowercase)
__lowerCAmelCase: Optional[int] = self.linear(__lowercase)
__lowerCAmelCase: Tuple = emb[:, :, None, None]
__lowerCAmelCase , __lowerCAmelCase: Any = emb.chunk(2 , dim=1)
__lowerCAmelCase: Optional[Any] = F.group_norm(__lowercase , self.num_groups , eps=self.eps)
__lowerCAmelCase: Dict = x * (1 + scale) + shift
return x
| 346
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Dict ):
'''simple docstring'''
__lowercase =DPTConfig()
if "large" in checkpoint_url:
__lowercase =10_24
__lowercase =40_96
__lowercase =24
__lowercase =16
__lowercase =[5, 11, 17, 23]
__lowercase =[2_56, 5_12, 10_24, 10_24]
__lowercase =(1, 3_84, 3_84)
if "ade" in checkpoint_url:
__lowercase =True
__lowercase =1_50
__lowercase ='huggingface/label-files'
__lowercase ='ade20k-id2label.json'
__lowercase =json.load(open(cached_download(hf_hub_url(lowercase__, lowercase__, repo_type='dataset' ) ), 'r' ) )
__lowercase ={int(lowercase__ ): v for k, v in idalabel.items()}
__lowercase =idalabel
__lowercase ={v: k for k, v in idalabel.items()}
__lowercase =[1, 1_50, 4_80, 4_80]
return config, expected_shape
def __UpperCamelCase ( lowercase__ : Optional[int] ):
'''simple docstring'''
__lowercase =['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__, lowercase__ )
def __UpperCamelCase ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowercase =name.replace('pretrained.model', 'dpt.encoder' )
if "pretrained.model" in name:
__lowercase =name.replace('pretrained.model', 'dpt.embeddings' )
if "patch_embed" in name:
__lowercase =name.replace('patch_embed', 'patch_embeddings' )
if "pos_embed" in name:
__lowercase =name.replace('pos_embed', 'position_embeddings' )
if "attn.proj" in name:
__lowercase =name.replace('attn.proj', 'attention.output.dense' )
if "proj" in name and "project" not in name:
__lowercase =name.replace('proj', 'projection' )
if "blocks" in name:
__lowercase =name.replace('blocks', 'layer' )
if "mlp.fc1" in name:
__lowercase =name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
__lowercase =name.replace('mlp.fc2', 'output.dense' )
if "norm1" in name:
__lowercase =name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
__lowercase =name.replace('norm2', 'layernorm_after' )
if "scratch.output_conv" in name:
__lowercase =name.replace('scratch.output_conv', 'head' )
if "scratch" in name:
__lowercase =name.replace('scratch', 'neck' )
if "layer1_rn" in name:
__lowercase =name.replace('layer1_rn', 'convs.0' )
if "layer2_rn" in name:
__lowercase =name.replace('layer2_rn', 'convs.1' )
if "layer3_rn" in name:
__lowercase =name.replace('layer3_rn', 'convs.2' )
if "layer4_rn" in name:
__lowercase =name.replace('layer4_rn', 'convs.3' )
if "refinenet" in name:
__lowercase =int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowercase =name.replace(F'''refinenet{layer_idx}''', F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
__lowercase =name.replace('out_conv', 'projection' )
if "resConfUnit1" in name:
__lowercase =name.replace('resConfUnit1', 'residual_layer1' )
if "resConfUnit2" in name:
__lowercase =name.replace('resConfUnit2', 'residual_layer2' )
if "conv1" in name:
__lowercase =name.replace('conv1', 'convolution1' )
if "conv2" in name:
__lowercase =name.replace('conv2', 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowercase =name.replace('pretrained.act_postprocess1.0.project.0', 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowercase =name.replace('pretrained.act_postprocess2.0.project.0', 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowercase =name.replace('pretrained.act_postprocess3.0.project.0', 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowercase =name.replace('pretrained.act_postprocess4.0.project.0', 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowercase =name.replace('pretrained.act_postprocess1.3', 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
__lowercase =name.replace('pretrained.act_postprocess1.4', 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
__lowercase =name.replace('pretrained.act_postprocess2.3', 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
__lowercase =name.replace('pretrained.act_postprocess2.4', 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
__lowercase =name.replace('pretrained.act_postprocess3.3', 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
__lowercase =name.replace('pretrained.act_postprocess4.3', 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
__lowercase =name.replace('pretrained.act_postprocess4.4', 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
__lowercase =name.replace('pretrained', 'dpt' )
if "bn" in name:
__lowercase =name.replace('bn', 'batch_norm' )
if "head" in name:
__lowercase =name.replace('head', 'head.head' )
if "encoder.norm" in name:
__lowercase =name.replace('encoder.norm', 'layernorm' )
if "auxlayer" in name:
__lowercase =name.replace('auxlayer', 'auxiliary_head.head' )
return name
def __UpperCamelCase ( lowercase__ : Any, lowercase__ : Optional[Any] ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase =state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
__lowercase =state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowercase =in_proj_weight[: config.hidden_size, :]
__lowercase =in_proj_bias[: config.hidden_size]
__lowercase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase =in_proj_weight[
-config.hidden_size :, :
]
__lowercase =in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase =Image.open(requests.get(lowercase__, stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : Optional[Any], lowercase__ : str, lowercase__ : List[str], lowercase__ : List[str] ):
'''simple docstring'''
__lowercase , __lowercase =get_dpt_config(lowercase__ )
# load original state_dict from URL
__lowercase =torch.hub.load_state_dict_from_url(lowercase__, map_location='cpu' )
# remove certain keys
remove_ignore_keys_(lowercase__ )
# rename keys
for key in state_dict.copy().keys():
__lowercase =state_dict.pop(lowercase__ )
__lowercase =val
# read in qkv matrices
read_in_q_k_v(lowercase__, lowercase__ )
# load HuggingFace model
__lowercase =DPTForSemanticSegmentation(lowercase__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# Check outputs on an image
__lowercase =4_80 if 'ade' in checkpoint_url else 3_84
__lowercase =DPTImageProcessor(size=lowercase__ )
__lowercase =prepare_img()
__lowercase =image_processor(lowercase__, return_tensors='pt' )
# forward pass
__lowercase =model(**lowercase__ ).logits if 'ade' in checkpoint_url else model(**lowercase__ ).predicted_depth
# Assert logits
__lowercase =torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
__lowercase =torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(lowercase__ )
assert (
torch.allclose(outputs[0, 0, :3, :3], lowercase__, atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], lowercase__ )
)
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowercase__, lowercase__ ), organization='nielsr', commit_message='Add model', use_temp_dir=lowercase__, )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase__, lowercase__ ), organization='nielsr', commit_message='Add image processor', use_temp_dir=lowercase__, )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCAmelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 119
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=SCREAMING_SNAKE_CASE , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=SCREAMING_SNAKE_CASE )
return parser.parse_args()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Any = parse_args()
# Import training_script as a module.
lowerCAmelCase : int = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase : Any = script_fpath.stem
lowerCAmelCase : Union[str, Any] = importlib.import_module(SCREAMING_SNAKE_CASE )
# Patch sys.argv
lowerCAmelCase : Tuple = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 703
|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = OmegaConf.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
lowerCAmelCase : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = "first_stage_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Tuple = "model.diffusion_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = state_dict[key]
lowerCAmelCase : List[str] = config.model.params.first_stage_config.params
lowerCAmelCase : List[Any] = config.model.params.unet_config.params
lowerCAmelCase : Union[str, Any] = VQModel(**SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = UNetLDMModel(**SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Tuple = LDMPipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 681
| 0
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : List[str] = ['''image_processor''', '''tokenizer''']
UpperCamelCase : Union[str, Any] = '''OwlViTImageProcessor'''
UpperCamelCase : Dict = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Any , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : Tuple ) -> Union[str, Any]:
_a : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase__ , )
_a : List[Any] = kwargs.pop("""feature_extractor""" )
_a : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[Any]="max_length" , UpperCAmelCase__ : int="np" , **UpperCAmelCase__ : int ) -> int:
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or (isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(text[0] , UpperCAmelCase__ )):
_a : Optional[Any] = [self.tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )]
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(text[0] , UpperCAmelCase__ ):
_a : Optional[int] = []
# Maximum number of queries across batch
_a : str = max([len(UpperCAmelCase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase__ ) != max_num_queries:
_a : Any = t + [""" """] * (max_num_queries - len(UpperCAmelCase__ ))
_a : Any = self.tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
encodings.append(UpperCAmelCase__ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
_a : List[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_a : Optional[Any] = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_a : str = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_a : str = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_a : List[str] = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
_a : Optional[int] = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_a : Optional[Any] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_a : List[Any] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
_a : Any = BatchEncoding()
_a : Any = input_ids
_a : Dict = attention_mask
if query_images is not None:
_a : List[Any] = BatchEncoding()
_a : Dict = self.image_processor(
UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ).pixel_values
_a : List[str] = query_pixel_values
if images is not None:
_a : str = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
_a : Any = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_a : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def _lowercase ( self : Any , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Union[str, Any] ) -> Tuple:
return self.image_processor.post_process(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Tuple , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Dict ) -> Any:
return self.image_processor.post_process_object_detection(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : List[Any] ) -> str:
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : int ) -> Optional[int]:
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Any , *UpperCAmelCase__ : int , **UpperCAmelCase__ : List[str] ) -> Optional[Any]:
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def _lowercase ( self : Dict ) -> Dict:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase__ , )
return self.image_processor_class
@property
def _lowercase ( self : Optional[Any] ) -> Dict:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase__ , )
return self.image_processor
| 389
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]:
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
_a : int = img
_a : Optional[int] = img.shape[1]
_a : List[Any] = img.shape[0]
_a : Dict = dst_width
_a : Optional[int] = dst_height
_a : str = self.src_w / self.dst_w
_a : Union[str, Any] = self.src_h / self.dst_h
_a : Tuple = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _lowercase ( self : Optional[Any] ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_a : List[Any] = self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )]
def _lowercase ( self : Any , UpperCAmelCase__ : int ) -> int:
return int(self.ratio_x * x )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
_snake_case , _snake_case = 800, 600
_snake_case = imread('image_data/lena.jpg', 1)
_snake_case = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 389
| 1
|
def __a ( ) -> Dict:
'''simple docstring'''
lowercase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowercase_ = 6
lowercase_ = 1
lowercase_ = 1_901
lowercase_ = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowercase_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowercase_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowercase_ = day - days_per_month[month - 2]
if month > 12:
year += 1
lowercase_ = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 719
|
'''simple docstring'''
def __a ( __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> bool:
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowercase_ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowercase_ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowercase_ = subset[i - 1][j]
if arr[i - 1] <= j:
lowercase_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 461
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 291
|
'''simple docstring'''
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = None
__UpperCAmelCase: Tuple = None
__UpperCAmelCase: List[Any] = graph
self._normalize_graph(snake_case_ , snake_case_ )
__UpperCAmelCase: Union[str, Any] = len(snake_case_ )
__UpperCAmelCase: List[str] = None
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if sources is int:
__UpperCAmelCase: List[Any] = [sources]
if sinks is int:
__UpperCAmelCase: Optional[Any] = [sinks]
if len(snake_case_ ) == 0 or len(snake_case_ ) == 0:
return
__UpperCAmelCase: Any = sources[0]
__UpperCAmelCase: int = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(snake_case_ ) > 1 or len(snake_case_ ) > 1:
__UpperCAmelCase: Union[str, Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__UpperCAmelCase: List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__UpperCAmelCase: Tuple = max_input_flow
__UpperCAmelCase: Any = 0
__UpperCAmelCase: Tuple = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__UpperCAmelCase: Tuple = max_input_flow
__UpperCAmelCase: Tuple = size - 1
def lowercase_ ( self ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = algorithm(self )
class a :
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Tuple = flow_network
__UpperCAmelCase: Dict = flow_network.verticesCount
__UpperCAmelCase: List[Any] = flow_network.sourceIndex
__UpperCAmelCase: int = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__UpperCAmelCase: Dict = flow_network.graph
__UpperCAmelCase: Optional[int] = False
def lowercase_ ( self ):
'''simple docstring'''
if not self.executed:
self._algorithm()
__UpperCAmelCase: str = True
def lowercase_ ( self ):
'''simple docstring'''
pass
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__(snake_case_ )
# use this to save your result
__UpperCAmelCase: int = -1
def lowercase_ ( self ):
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__(snake_case_ )
__UpperCAmelCase: Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )]
__UpperCAmelCase: Union[str, Any] = [0] * self.verticies_count
__UpperCAmelCase: Tuple = [0] * self.verticies_count
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__UpperCAmelCase: int = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__UpperCAmelCase: List[Any] = 0
while i < len(snake_case_ ):
__UpperCAmelCase: Optional[int] = vertices_list[i]
__UpperCAmelCase: Optional[int] = self.heights[vertex_index]
self.process_vertex(snake_case_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(snake_case_ ) )
__UpperCAmelCase: Union[str, Any] = 0
else:
i += 1
__UpperCAmelCase: Tuple = sum(self.preflow[self.source_index] )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(snake_case_ , snake_case_ )
self.relabel(snake_case_ )
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__UpperCAmelCase: str = self.heights[to_index]
if min_height is not None:
__UpperCAmelCase: Optional[Any] = min_height + 1
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = [0]
SCREAMING_SNAKE_CASE_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
SCREAMING_SNAKE_CASE_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
SCREAMING_SNAKE_CASE_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
SCREAMING_SNAKE_CASE_ = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 523
| 0
|
from math import isqrt
def A ( lowercase ) -> list[int]:
'''simple docstring'''
UpperCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowercase , lowercase ):
UpperCamelCase = False
return [i for i in range(2 , lowercase ) if is_prime[i]]
def A ( lowercase = 10**8 ) -> int:
'''simple docstring'''
UpperCamelCase = calculate_prime_numbers(max_number // 2 )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = len(lowercase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 3
|
from string import ascii_uppercase
_UpperCAmelCase : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
_UpperCAmelCase : Tuple = dict(enumerate(ascii_uppercase))
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = len(lowercase )
UpperCamelCase = 0
while True:
if x == i:
UpperCamelCase = 0
if len(lowercase ) == len(lowercase ):
break
key += key[i]
i += 1
return key
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def A ( ) -> None:
'''simple docstring'''
UpperCamelCase = 'THE GERMAN ATTACK'
UpperCamelCase = 'SECRET'
UpperCamelCase = generate_key(lowercase , lowercase )
UpperCamelCase = cipher_text(lowercase , lowercase )
print(f'''Encrypted Text = {s}''' )
print(f'''Original Text = {original_text(lowercase , lowercase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 3
| 1
|
"""simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : set , SCREAMING_SNAKE_CASE__ : set , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : PriorityQueue , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : float | int , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case_ : Union[str, Any] = cst_fwd.get(SCREAMING_SNAKE_CASE__ , np.inf )
snake_case_ : Any = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case_ : Dict = new_cost_f
snake_case_ : Any = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case_ : Optional[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ):
"""simple docstring"""
snake_case_ : Tuple = -1
snake_case_ : str = set()
snake_case_ : List[str] = set()
snake_case_ : int = {source: 0}
snake_case_ : str = {destination: 0}
snake_case_ : Optional[int] = {source: None}
snake_case_ : Dict = {destination: None}
snake_case_ : PriorityQueue[Any] = PriorityQueue()
snake_case_ : PriorityQueue[Any] = PriorityQueue()
snake_case_ : Union[str, Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case_ , snake_case_ : Dict = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Union[str, Any] = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE__ )
snake_case_ : str = pass_and_relaxation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
snake_case_ : Optional[int] = pass_and_relaxation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case_ : int = shortest_distance
return shortest_path_distance
a_ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
a_ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = ["""image_processor""", """tokenizer"""]
_A : Tuple = """LayoutLMv2ImageProcessor"""
_A : Tuple = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ):
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase__ , )
snake_case_ : int = kwargs.pop("""feature_extractor""" )
snake_case_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase__ , lowercase__ )
def __call__(self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
snake_case_ : Tuple = self.image_processor(images=lowercase__ , return_tensors=lowercase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case_ : Optional[int] = features["""words"""]
snake_case_ : List[Any] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# add pixel values
snake_case_ : Any = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
snake_case_ : List[str] = self.get_overflowing_images(lowercase__ , encoded_inputs["""overflow_to_sample_mapping"""] )
snake_case_ : str = images
return encoded_inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case_ : Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f' {len(lowercase__ )} and {len(lowercase__ )}' )
return images_with_overflow
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __UpperCamelCase (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , )
return self.image_processor_class
@property
def __UpperCamelCase (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase__ , )
return self.image_processor
| 480
| 1
|
a_ : Optional[Any] = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 484
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=0 , ):
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
lowerCamelCase = projection_dim
def _a (self ):
'''simple docstring'''
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
lowerCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = TFDPRContextEncoder(config=__a )
lowerCamelCase = model(__a , attention_mask=__a , token_type_ids=__a )
lowerCamelCase = model(__a , token_type_ids=__a )
lowerCamelCase = model(__a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a (self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = TFDPRQuestionEncoder(config=__a )
lowerCamelCase = model(__a , attention_mask=__a , token_type_ids=__a )
lowerCamelCase = model(__a , token_type_ids=__a )
lowerCamelCase = model(__a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a (self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = TFDPRReader(config=__a )
lowerCamelCase = model(__a , attention_mask=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = config_and_inputs
lowerCamelCase = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_A = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
_A = False
_A = False
_A = False
_A = False
_A = False
def _a (self ):
'''simple docstring'''
lowerCamelCase = TFDPRModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=__a , hidden_size=37 )
def _a (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__a )
@slow
def _a (self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFDPRContextEncoder.from_pretrained(__a )
self.assertIsNotNone(__a )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFDPRContextEncoder.from_pretrained(__a )
self.assertIsNotNone(__a )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFDPRQuestionEncoder.from_pretrained(__a )
self.assertIsNotNone(__a )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFDPRReader.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def _a (self ):
'''simple docstring'''
lowerCamelCase = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowerCamelCase = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase = model(__a )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 484
| 1
|
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__lowercase : Any = '''\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'''
__lowercase : Union[str, Any] = '''\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'''
__lowercase : Union[str, Any] = '''\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def UpperCAmelCase__ (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def UpperCAmelCase__ (self , A , A , A=4 , A=False ):
lowerCamelCase_ : Optional[Any] = compute_bleu(
reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase )
(lowerCamelCase_) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 422
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None:
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 249
| 0
|
from __future__ import annotations
UpperCamelCase = [True] * 100_0001
UpperCamelCase = 2
while i * i <= 100_0000:
if seive[i]:
for j in range(i * i, 100_0001, i):
UpperCamelCase = False
i += 1
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return seive[n]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return any(digit in '''02468''' for digit in str(SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 1_000_000 ):
A_ : List[str] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(SCREAMING_SNAKE_CASE ) and not contains_an_even_digit(SCREAMING_SNAKE_CASE ):
A_ : List[Any] = str(SCREAMING_SNAKE_CASE )
A_ : Any = [int(str_num[j:] + str_num[:j] ) for j in range(len(SCREAMING_SNAKE_CASE ) )]
if all(is_prime(SCREAMING_SNAKE_CASE ) for i in list_nums ):
result.append(SCREAMING_SNAKE_CASE )
return result
def _SCREAMING_SNAKE_CASE ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 712
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152
| 0
|
"""simple docstring"""
A_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
A_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = from_type.lower().strip('''s''' )
lowerCamelCase_ = to_type.lower().strip('''s''' )
lowerCamelCase_ = UNIT_SYMBOL.get(__snake_case ,__snake_case )
lowerCamelCase_ = UNIT_SYMBOL.get(__snake_case ,__snake_case )
if from_sanitized not in METRIC_CONVERSION:
lowerCamelCase_ = (
f"Invalid \'from_type\' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(__snake_case )}"
)
raise ValueError(__snake_case )
if to_sanitized not in METRIC_CONVERSION:
lowerCamelCase_ = (
f"Invalid \'to_type\' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(__snake_case )}"
)
raise ValueError(__snake_case )
lowerCamelCase_ = METRIC_CONVERSION[from_sanitized]
lowerCamelCase_ = METRIC_CONVERSION[to_sanitized]
lowerCamelCase_ = 1
if from_exponent > to_exponent:
lowerCamelCase_ = from_exponent - to_exponent
else:
lowerCamelCase_ = -(to_exponent - from_exponent)
return value * pow(10 ,__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 317
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCamelCase__ = Features({"""audio""": Audio()} )
UpperCamelCase__ = Features({"""labels""": ClassLabel} )
UpperCamelCase__ = "audio"
UpperCamelCase__ = "labels"
def __lowercase ( self , UpperCAmelCase_) -> str:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowercase__: List[str] = copy.deepcopy(self)
lowercase__: Any = self.label_schema.copy()
lowercase__: Dict = features[self.label_column]
lowercase__: int = label_schema
return task_template
@property
def __lowercase ( self) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 120
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _a ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Any = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
lowercase__: Optional[int] = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(UpperCAmelCase_) , UpperCAmelCase_)
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Any = np.random.randn(3 , 4)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_) , x.transpose()))
lowercase__: str = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0)) , x.transpose((1, 2, 0))))
@require_torch
def __lowercase ( self) -> Dict:
'''simple docstring'''
lowercase__: str = np.random.randn(3 , 4)
lowercase__: str = torch.tensor(UpperCAmelCase_)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_) , transpose(UpperCAmelCase_).numpy()))
lowercase__: Dict = np.random.randn(3 , 4 , 5)
lowercase__: Optional[Any] = torch.tensor(UpperCAmelCase_)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0)) , transpose(UpperCAmelCase_ , axes=(1, 2, 0)).numpy()))
@require_tf
def __lowercase ( self) -> List[str]:
'''simple docstring'''
lowercase__: Optional[int] = np.random.randn(3 , 4)
lowercase__: Optional[Any] = tf.constant(UpperCAmelCase_)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_) , transpose(UpperCAmelCase_).numpy()))
lowercase__: Optional[int] = np.random.randn(3 , 4 , 5)
lowercase__: Optional[Any] = tf.constant(UpperCAmelCase_)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0)) , transpose(UpperCAmelCase_ , axes=(1, 2, 0)).numpy()))
@require_flax
def __lowercase ( self) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = np.random.randn(3 , 4)
lowercase__: Dict = jnp.array(UpperCAmelCase_)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_) , np.asarray(transpose(UpperCAmelCase_))))
lowercase__: Dict = np.random.randn(3 , 4 , 5)
lowercase__: Optional[Any] = jnp.array(UpperCAmelCase_)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0)) , np.asarray(transpose(UpperCAmelCase_ , axes=(1, 2, 0)))))
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: List[str] = np.random.randn(3 , 4)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3)) , np.reshape(UpperCAmelCase_ , (4, 3))))
lowercase__: Dict = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5)) , np.reshape(UpperCAmelCase_ , (12, 5))))
@require_torch
def __lowercase ( self) -> int:
'''simple docstring'''
lowercase__: Any = np.random.randn(3 , 4)
lowercase__: Dict = torch.tensor(UpperCAmelCase_)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3)) , reshape(UpperCAmelCase_ , (4, 3)).numpy()))
lowercase__: List[str] = np.random.randn(3 , 4 , 5)
lowercase__: Any = torch.tensor(UpperCAmelCase_)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5)) , reshape(UpperCAmelCase_ , (12, 5)).numpy()))
@require_tf
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: int = np.random.randn(3 , 4)
lowercase__: Optional[int] = tf.constant(UpperCAmelCase_)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3)) , reshape(UpperCAmelCase_ , (4, 3)).numpy()))
lowercase__: int = np.random.randn(3 , 4 , 5)
lowercase__: str = tf.constant(UpperCAmelCase_)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5)) , reshape(UpperCAmelCase_ , (12, 5)).numpy()))
@require_flax
def __lowercase ( self) -> int:
'''simple docstring'''
lowercase__: Union[str, Any] = np.random.randn(3 , 4)
lowercase__: Dict = jnp.array(UpperCAmelCase_)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3)) , np.asarray(reshape(UpperCAmelCase_ , (4, 3)))))
lowercase__: Union[str, Any] = np.random.randn(3 , 4 , 5)
lowercase__: Any = jnp.array(UpperCAmelCase_)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5)) , np.asarray(reshape(UpperCAmelCase_ , (12, 5)))))
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Any = np.random.randn(1 , 3 , 4)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_) , np.squeeze(UpperCAmelCase_)))
lowercase__: int = np.random.randn(1 , 4 , 1 , 5)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2) , np.squeeze(UpperCAmelCase_ , axis=2)))
@require_torch
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Optional[int] = np.random.randn(1 , 3 , 4)
lowercase__: Any = torch.tensor(UpperCAmelCase_)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_) , squeeze(UpperCAmelCase_).numpy()))
lowercase__: str = np.random.randn(1 , 4 , 1 , 5)
lowercase__: str = torch.tensor(UpperCAmelCase_)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2) , squeeze(UpperCAmelCase_ , axis=2).numpy()))
@require_tf
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: int = np.random.randn(1 , 3 , 4)
lowercase__: List[str] = tf.constant(UpperCAmelCase_)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_) , squeeze(UpperCAmelCase_).numpy()))
lowercase__: Any = np.random.randn(1 , 4 , 1 , 5)
lowercase__: Optional[int] = tf.constant(UpperCAmelCase_)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2) , squeeze(UpperCAmelCase_ , axis=2).numpy()))
@require_flax
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Any = np.random.randn(1 , 3 , 4)
lowercase__: List[str] = jnp.array(UpperCAmelCase_)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_) , np.asarray(squeeze(UpperCAmelCase_))))
lowercase__: Optional[int] = np.random.randn(1 , 4 , 1 , 5)
lowercase__: int = jnp.array(UpperCAmelCase_)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2) , np.asarray(squeeze(UpperCAmelCase_ , axis=2))))
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
lowercase__: List[str] = np.random.randn(3 , 4)
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1) , np.expand_dims(UpperCAmelCase_ , axis=1)))
@require_torch
def __lowercase ( self) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = np.random.randn(3 , 4)
lowercase__: Optional[int] = torch.tensor(UpperCAmelCase_)
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1) , expand_dims(UpperCAmelCase_ , axis=1).numpy()))
@require_tf
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Tuple = np.random.randn(3 , 4)
lowercase__: Optional[Any] = tf.constant(UpperCAmelCase_)
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1) , expand_dims(UpperCAmelCase_ , axis=1).numpy()))
@require_flax
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = np.random.randn(3 , 4)
lowercase__: Any = jnp.array(UpperCAmelCase_)
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1) , np.asarray(expand_dims(UpperCAmelCase_ , axis=1))))
| 120
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : Union[str, Any] = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 153
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :Dict = DDIMPipeline
_UpperCAmelCase :List[str] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCAmelCase :List[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
_UpperCAmelCase :Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase :Tuple = False
def UpperCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Tuple =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
lowerCamelCase_ : Union[str, Any] =DDIMScheduler()
lowerCamelCase_ : int ={"unet": unet, "scheduler": scheduler}
return components
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Any=0 ):
if str(snake_case__ ).startswith("mps" ):
lowerCamelCase_ : Any =torch.manual_seed(snake_case__ )
else:
lowerCamelCase_ : List[Any] =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCamelCase_ : List[Any] ={
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : List[Any] ="cpu"
lowerCamelCase_ : List[Any] =self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] =self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Any =self.get_dummy_inputs(snake_case__ )
lowerCamelCase_ : List[str] =pipe(**snake_case__ ).images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowerCamelCase_ : Optional[Any] =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowerCamelCase_ : Dict =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case__ , 1E-3 )
def UpperCAmelCase__ ( self : List[Any] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : Dict ):
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : str ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Any ="google/ddpm-cifar10-32"
lowerCamelCase_ : List[Any] =UNetaDModel.from_pretrained(snake_case__ )
lowerCamelCase_ : str =DDIMScheduler()
lowerCamelCase_ : Optional[int] =DDIMPipeline(unet=snake_case__ , scheduler=snake_case__ )
ddim.to(snake_case__ )
ddim.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Optional[int] =torch.manual_seed(0 )
lowerCamelCase_ : str =ddim(generator=snake_case__ , eta=0.0 , output_type="numpy" ).images
lowerCamelCase_ : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : int =np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : str ="google/ddpm-ema-bedroom-256"
lowerCamelCase_ : Tuple =UNetaDModel.from_pretrained(snake_case__ )
lowerCamelCase_ : Dict =DDIMScheduler.from_pretrained(snake_case__ )
lowerCamelCase_ : str =DDIMPipeline(unet=snake_case__ , scheduler=snake_case__ )
ddpm.to(snake_case__ )
ddpm.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int =torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] =ddpm(generator=snake_case__ , output_type="numpy" ).images
lowerCamelCase_ : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase_ : Tuple =np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 153
| 1
|
import qiskit
def __lowerCamelCase ( A__ : int , A__ : int ) -> qiskit.result.counts.Counts:
lowerCamelCase_ : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
lowerCamelCase_ : Dict = qiskit.QuantumCircuit(A__ , A__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
lowerCamelCase_ : List[str] = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(A__ )
if __name__ == "__main__":
snake_case__ : Dict = single_qubit_measure(2, 2)
print(F'Total count for various states are: {counts}')
| 720
|
from math import asin, atan, cos, radians, sin, sqrt, tan
snake_case__ : List[Any] = 6_3_7_8_1_3_7.0
snake_case__ : List[str] = 6_3_5_6_7_5_2.3_1_4_2_4_5
snake_case__ : int = 637_8137
def __lowerCamelCase ( A__ : float , A__ : float , A__ : float , A__ : float ) -> float:
lowerCamelCase_ : Optional[Any] = (AXIS_A - AXIS_B) / AXIS_A
lowerCamelCase_ : int = atan((1 - flattening) * tan(radians(A__ ) ) )
lowerCamelCase_ : List[Any] = atan((1 - flattening) * tan(radians(A__ ) ) )
lowerCamelCase_ : Union[str, Any] = radians(A__ )
lowerCamelCase_ : Tuple = radians(A__ )
# Equation
lowerCamelCase_ : str = sin((phi_a - phi_a) / 2 )
lowerCamelCase_ : str = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowerCamelCase_ : List[str] = sqrt(sin_sq_phi + (cos(A__ ) * cos(A__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : torch.FloatTensor
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 88 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "geglu" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , ) -> Optional[Any]:
super().__init__()
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : str = attention_head_dim
__lowerCamelCase : Optional[int] = num_attention_heads * attention_head_dim
__lowerCamelCase : Tuple = in_channels
__lowerCamelCase : List[str] = torch.nn.GroupNorm(num_groups=SCREAMING_SNAKE_CASE_ , num_channels=SCREAMING_SNAKE_CASE_ , eps=1E-6 , affine=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. Define transformers blocks
__lowerCamelCase : List[str] = nn.ModuleList(
[
BasicTransformerBlock(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , cross_attention_dim=SCREAMING_SNAKE_CASE_ , activation_fn=SCREAMING_SNAKE_CASE_ , attention_bias=SCREAMING_SNAKE_CASE_ , double_self_attention=SCREAMING_SNAKE_CASE_ , norm_elementwise_affine=SCREAMING_SNAKE_CASE_ , )
for d in range(SCREAMING_SNAKE_CASE_ )
] )
__lowerCamelCase : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = hidden_states.shape
__lowerCamelCase : Dict = batch_frames // num_frames
__lowerCamelCase : Tuple = hidden_states
__lowerCamelCase : Any = hidden_states[None, :].reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowerCamelCase : List[str] = self.norm(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.proj_in(SCREAMING_SNAKE_CASE_ )
# 2. Blocks
for block in self.transformer_blocks:
__lowerCamelCase : List[Any] = block(
SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , cross_attention_kwargs=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ , )
# 3. Output
__lowerCamelCase : Dict = self.proj_out(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = (
hidden_states[None, None, :]
.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowerCamelCase : List[Any] = hidden_states.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=SCREAMING_SNAKE_CASE_ )
| 13
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( __magic_name__ ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> int:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = 0.0 , UpperCamelCase_ = 50 , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
snake_case__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
snake_case__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case__ = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case__ = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
snake_case__ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 368
| 0
|
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ : List[str] = 3_00 # TEMPERATURE (unit = K)
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[Any] , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : List[str] , **UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase : int = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__UpperCAmelCase : Optional[Any] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict="This is a photo of {}." ):
"""simple docstring"""
__UpperCAmelCase : int = load_image(UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCAmelCase : Optional[int] = candidate_labels
__UpperCAmelCase : Dict = [hypothesis_template.format(UpperCAmelCase_ ) for x in candidate_labels]
__UpperCAmelCase : Any = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = [text_inputs]
return inputs
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Any = model_inputs.pop("candidate_labels" )
__UpperCAmelCase : str = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = text_inputs[0]
else:
# Batching case.
__UpperCAmelCase : Any = text_inputs[0][0]
__UpperCAmelCase : int = self.model(**UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = model_outputs.pop("candidate_labels" )
__UpperCAmelCase : Optional[int] = model_outputs["logits"][0]
if self.framework == "pt":
__UpperCAmelCase : Dict = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCAmelCase : Tuple = probs.tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = [scores]
elif self.framework == "tf":
__UpperCAmelCase : Optional[int] = stable_softmax(UpperCAmelCase_ , axis=-1 )
__UpperCAmelCase : Tuple = probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__UpperCAmelCase : Optional[int] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_ ) , key=lambda UpperCAmelCase_ : -x[0] )
]
return result
| 329
| 1
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase_ : int = num
lowerCAmelCase_ : int = 0
while num > 0:
lowerCAmelCase_ : Union[str, Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A : str = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A (_a ):
"""simple docstring"""
UpperCAmelCase : List[Any] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase : Union[str, Any] = """CLIPImageProcessor"""
UpperCAmelCase : str = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[int] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[int]):
a : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
a : int = kwargs.pop("feature_extractor")
a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def __call__( self : Union[str, Any] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : str):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
a : str = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase)
if images is not None:
a : int = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None and images is not None:
a : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase) , tensor_type=__UpperCAmelCase)
def __snake_case ( self : int , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int):
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : str , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : List[Any]):
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase)
@property
def __snake_case ( self : str):
a : Union[str, Any] = self.tokenizer.model_input_names
a : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __snake_case ( self : Optional[int]):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def __snake_case ( self : str):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 704
|
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__lowercase = float("""nan""")
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : Optional[int]):
a : Any = sys.stdout
a : Any = open(__UpperCAmelCase , "a")
def __getattr__( self : Dict , __UpperCAmelCase : List[Any]):
return getattr(self.stdout , __UpperCAmelCase)
def __snake_case ( self : Any , __UpperCAmelCase : Any):
self.stdout.write(__UpperCAmelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __UpperCAmelCase , 0 , re.M))
def lowercase ( A_=80 , A_=False )-> List[str]:
'''simple docstring'''
a : List[Any] = []
# deal with critical env vars
a : List[Any] = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
a : Any = os.environ.get(A_ , A_ )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
a : List[Any] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(A_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
a : Any = []
a : Any = ""
while len(A_ ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(A_ ) == 0 or len(A_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(A_ )
a : List[Any] = ""
return "\\\n".join(A_ )
def lowercase ( A_ , A_ )-> Tuple:
'''simple docstring'''
a : List[str] = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
a : Optional[int] = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
a : Dict = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ , A_ )-> int:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
a : Optional[Any] = subprocess.run(A_ , capture_output=A_ , text=A_ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
a : List[str] = variation.replace(" " , "-" )
with open(Path(A_ ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(A_ ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
a : Dict = json.load(A_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )-> Tuple:
'''simple docstring'''
a : List[Any] = []
a : List[str] = []
a : Union[str, Any] = F'''{id}: {variation:<{longest_variation_len}}'''
a : Any = F'''{preamble}: '''
a : Optional[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(A_ ) , desc=A_ , leave=A_ ):
a : Dict = process_run_single(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
a : Tuple = single_run_metrics[target_metric_key]
if not math.isnan(A_ ):
metrics.append(A_ )
results.append(A_ )
outcome += "✓"
else:
outcome += "✘"
a : List[str] = F'''\33[2K\r{outcome}'''
if len(A_ ) > 0:
a : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
a : Tuple = round(mean_metrics[target_metric_key] , 2 )
a : Optional[int] = F'''{outcome} {mean_target}'''
if len(A_ ) > 1:
results_str += F''' {tuple(round(A_ , 2 ) for x in results )}'''
print(A_ )
a : Optional[int] = variation
return mean_metrics
else:
print(A_ )
return {variation_key: variation, target_metric_key: nan}
def lowercase ( )-> Any:
'''simple docstring'''
a : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
a : Optional[Any] = pd.DataFrame(A_ )
a : Tuple = "variation"
a : Union[str, Any] = "diff_%"
a : Optional[Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
a : List[str] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(A_ ):
# as a fallback, use the minimal value as the sentinel
a : Optional[int] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(A_ ):
a : Tuple = df.apply(
lambda A_ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
a : str = [variation_key, target_metric_key, diff_key, *report_metric_keys]
a : Tuple = df.reindex(A_ , axis="columns" ) # reorder cols
# capitalize
a : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
a : Dict = df.rename(lambda A_ : c.replace("_" , "<br>" ) , axis="columns" )
a : Tuple = df.rename(lambda A_ : c.replace("_" , "\n" ) , axis="columns" )
a : Dict = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=A_ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=A_ , floatfmt=".2f" )]
print("\n\n".join(A_ ) )
def lowercase ( )-> List[str]:
'''simple docstring'''
a : str = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=A_ , type=A_ , required=A_ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=A_ , type=A_ , nargs="+" , required=A_ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=A_ , type=A_ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=A_ , type=A_ , required=A_ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=A_ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=A_ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=A_ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=A_ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
a : int = parser.parse_args()
a : str = args.output_dir
Path(A_ ).mkdir(exist_ok=A_ )
a : Tuple = get_base_command(A_ , A_ )
# split each dimension into its --foo variations
a : Optional[int] = [list(map(str.strip , re.split(R"\|" , A_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
a : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*A_ ) ) ) )
a : str = max(len(A_ ) for x in variations )
# split wanted keys
a : Tuple = args.report_metric_keys.split()
# capture prints into a log file for convenience
a : Optional[int] = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
a : str = Tee(A_ )
print(F'''\n*** Running {len(A_ )} benchmarks:''' )
print(F'''Base command: {" ".join(A_ )}''' )
a : str = "variation"
a : List[Any] = []
for id, variation in enumerate(tqdm(A_ , desc="Total completion: " , leave=A_ ) ):
a : List[Any] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , A_ , A_ , A_ , A_ , args.target_metric_key , A_ , args.repeat_times , A_ , args.verbose , ) )
process_results(A_ , args.target_metric_key , A_ , args.base_variation , A_ )
if __name__ == "__main__":
main()
| 135
| 0
|
def lowercase ( __A : list ) -> bool:
'''simple docstring'''
if not isinstance(__A , __A ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__A ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(__A ) == 1:
return True
snake_case : int = series[1] - series[0]
for index in range(len(__A ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not isinstance(__A , __A ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__A ) == 0:
raise ValueError("""Input list must be a non empty list""" )
snake_case : Any = 0
for val in series:
answer += val
return answer / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
SCREAMING_SNAKE_CASE_ :int = DetaConfig(
backbone_config=a , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=a , with_box_refine=a , two_stage=a , )
# set labels
SCREAMING_SNAKE_CASE_ :str = "huggingface/label-files"
if "o365" in model_name:
SCREAMING_SNAKE_CASE_ :List[Any] = 366
SCREAMING_SNAKE_CASE_ :str = "object365-id2label.json"
else:
SCREAMING_SNAKE_CASE_ :Optional[int] = 91
SCREAMING_SNAKE_CASE_ :List[str] = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE_ :List[Any] = num_labels
SCREAMING_SNAKE_CASE_ :Dict = json.load(open(cached_download(hf_hub_url(a , a , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE_ :Tuple = {int(a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ :List[str] = idalabel
SCREAMING_SNAKE_CASE_ :str = {v: k for k, v in idalabel.items()}
return config
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = dct.pop(a )
SCREAMING_SNAKE_CASE_ :Dict = val
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE_ :Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ :List[Any] = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE_ :int = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ :Optional[Any] = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE_ :Tuple = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE_ :str = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_ :Dict = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_ :Any = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE_ :str = in_proj_bias[-dim :]
# fmt: on
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :str = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE_ :Optional[Any] = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE_ :int = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ :Union[str, Any] = in_proj_weight[:hidden_size, :]
SCREAMING_SNAKE_CASE_ :Tuple = in_proj_bias[:hidden_size]
SCREAMING_SNAKE_CASE_ :Dict = in_proj_weight[
hidden_size : hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ :int = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE_ :List[str] = in_proj_weight[-hidden_size:, :]
SCREAMING_SNAKE_CASE_ :str = in_proj_bias[-hidden_size:]
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ :str = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Dict = get_deta_config(a )
# load original state dict
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE_ :int = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE_ :Any = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F"Model name {model_name} not supported" )
SCREAMING_SNAKE_CASE_ :int = torch.load(a , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(a , param.shape )
# rename keys
SCREAMING_SNAKE_CASE_ :Union[str, Any] = create_rename_keys(a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_swin_q_k_v(a , config.backbone_config )
read_in_decoder_q_k_v(a , a )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
SCREAMING_SNAKE_CASE_ :int = state_dict.pop(a )
SCREAMING_SNAKE_CASE_ :str = val
if "input_proj" in key:
SCREAMING_SNAKE_CASE_ :List[Any] = state_dict.pop(a )
SCREAMING_SNAKE_CASE_ :List[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
SCREAMING_SNAKE_CASE_ :List[str] = state_dict.pop(a )
SCREAMING_SNAKE_CASE_ :Tuple = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE_ :Union[str, Any] = DetaForObjectDetection(a )
model.load_state_dict(a )
model.eval()
SCREAMING_SNAKE_CASE_ :Tuple = "cuda" if torch.cuda.is_available() else "cpu"
model.to(a )
# load image processor
SCREAMING_SNAKE_CASE_ :Union[str, Any] = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
SCREAMING_SNAKE_CASE_ :str = prepare_img()
SCREAMING_SNAKE_CASE_ :Union[str, Any] = processor(images=a , return_tensors="pt" )
SCREAMING_SNAKE_CASE_ :str = encoding["pixel_values"]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model(pixel_values.to(a ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE_ :List[Any] = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
SCREAMING_SNAKE_CASE_ :str = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE_ :Any = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
SCREAMING_SNAKE_CASE_ :int = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(a ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(a ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
processor.save_pretrained(a )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 140
|
from math import factorial, pi
def lowercase ( a , a = 30 ):
'''simple docstring'''
if not isinstance(a , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(a , a ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
SCREAMING_SNAKE_CASE_ :Optional[int] = float(a )
SCREAMING_SNAKE_CASE_ :Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(a ) )
def lowercase ( a , a = 30 ):
'''simple docstring'''
if not isinstance(a , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(a , a ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = float(a )
SCREAMING_SNAKE_CASE_ :List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 140
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
__magic_name__ : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ (self : int ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = (3, 32, 128)
SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE : Optional[int] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
SCREAMING_SNAKE_CASE : Any = dict(zip(__UpperCAmelCase, range(len(__UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
SCREAMING_SNAKE_CASE : List[str] = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, __UpperCAmelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase, __UpperCAmelCase )
def lowercase__ (self : List[Any], **__UpperCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **__UpperCAmelCase )
def lowercase__ (self : Dict, **__UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__UpperCAmelCase )
def lowercase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ (self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
SCREAMING_SNAKE_CASE : Tuple = Image.fromarray(np.moveaxis(__UpperCAmelCase, 0, -1 ) )
return image_input
def lowercase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Dict = MgpstrProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : str = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=__UpperCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, __UpperCAmelCase )
def lowercase__ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = self.get_image_processor()
SCREAMING_SNAKE_CASE : Tuple = MgpstrProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE : Any = self.get_image_processor(do_normalize=__UpperCAmelCase, padding_value=1.0 )
SCREAMING_SNAKE_CASE : Optional[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=__UpperCAmelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __UpperCAmelCase )
def lowercase__ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = MgpstrProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Tuple = image_processor(__UpperCAmelCase, return_tensors='''np''' )
SCREAMING_SNAKE_CASE : Dict = processor(images=__UpperCAmelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase__ (self : Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = MgpstrProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = '''test'''
SCREAMING_SNAKE_CASE : Tuple = processor(text=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = MgpstrProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = '''test'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Optional[int] = processor(text=__UpperCAmelCase, images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def lowercase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = MgpstrProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : List[str] = processor.char_decode(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(__UpperCAmelCase, __UpperCAmelCase )
def lowercase__ (self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = MgpstrProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=__UpperCAmelCase, images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ (self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = MgpstrProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = torch.randn(1, 27, 38 )
SCREAMING_SNAKE_CASE : Tuple = torch.randn(1, 27, 50257 )
SCREAMING_SNAKE_CASE : List[Any] = torch.randn(1, 27, 30522 )
SCREAMING_SNAKE_CASE : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 507
|
'''simple docstring'''
snake_case_ = {}
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE : Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE : str = _calculate(days - 1 , _SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE : int = _calculate(days - 1 , _SCREAMING_SNAKE_CASE , 0 )
SCREAMING_SNAKE_CASE : Tuple = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE : List[Any] = prizestrings
return prizestrings
def __lowercase (_SCREAMING_SNAKE_CASE :int = 30 ):
return _calculate(_SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 507
| 1
|
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a__ ( unittest.TestCase ):
__lowerCAmelCase = JukeboxTokenizer
__lowerCAmelCase = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def __magic_name__ ( self ):
import torch
lowercase : Any = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
lowercase : List[str] = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowercase : Any = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __magic_name__ ( self ):
import torch
lowercase : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
lowercase : Optional[Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowercase : Tuple = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 702
|
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_A : int = 20_48
_A : List[Any] = 40_96
_A : Any = 42
_A : List[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""")
_A : Union[str, Any] = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def __magic_name__ ( __snake_case : Dict ) -> Optional[Any]:
def choose_first(__snake_case : Any , __snake_case : str=False ):
assert isinstance(__snake_case , __snake_case )
if len(__snake_case ) == 1:
lowercase : List[Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowercase : Any = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
lowercase : Any = {"id": example["id"]}
lowercase : List[str] = example["annotations"]
lowercase : Optional[int] = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowercase : Optional[int] = ["yes"] if 1 in yes_no_answer else ["no"]
lowercase : List[Any] = []
lowercase : Dict = []
lowercase : str = ["<cls>"]
else:
lowercase : int = ["short"]
lowercase : Optional[int] = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
lowercase : Dict = ["long"]
lowercase : Optional[int] = choose_first(annotation["long_answer"] , is_long_answer=__snake_case )
lowercase : int = []
answer.update(__snake_case )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
lowercase : str = True
else:
lowercase : List[str] = False
lowercase : Optional[Any] = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , __snake_case ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : Tuple=False ) -> Union[str, Any]:
lowercase : Tuple = _get_single_answer(__snake_case )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase : Any = example["document"]["tokens"]
lowercase : List[str] = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowercase : List[Any] = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowercase : Optional[int] = example["document"]["tokens"]
lowercase : Union[str, Any] = answer["start_token"]
lowercase : List[str] = answer["end_token"]
lowercase : int = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowercase : Dict = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
lowercase : List[str] = doc["is_html"][answer["start_token"] : answer["end_token"]]
lowercase : Any = doc["token"][answer["start_token"] : answer["end_token"]]
lowercase : Dict = " ".join([old[i] for i in range(len(__snake_case ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , __snake_case , end="\n" )
print("Old:" , __snake_case , end="\n\n" )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : int=2048 , __snake_case : Optional[Any]=4096 , __snake_case : int=True ) -> Tuple:
# overlap will be of doc_stride - q_len
lowercase : List[Any] = get_context_and_ans(__snake_case , assertion=__snake_case )
lowercase : List[Any] = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowercase : Tuple = tokenizer(example["question"]["text"] , out["context"] ).input_ids
lowercase : Optional[int] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase : List[str] = []
lowercase : Optional[int] = []
lowercase : Any = input_ids[:q_len]
lowercase : List[Any] = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
for i in doc_start_indices:
lowercase : List[Any] = i + max_length - q_len
lowercase : Dict = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(__snake_case ),
"end_token": [-100] * len(__snake_case ),
"category": category,
},
}
lowercase : List[str] = out["context"].split()
lowercase : Tuple = splitted_context[answer["end_token"]]
lowercase : List[str] = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=__snake_case , ).input_ids )
lowercase : Tuple = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=__snake_case ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowercase : List[str] = len(tokenizer(__snake_case , add_special_tokens=__snake_case ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowercase : Optional[Any] = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowercase : Tuple = answer["start_token"]
lowercase : Optional[Any] = answer["end_token"]
if assertion:
lowercase : str = tokenizer.decode(__snake_case )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , __snake_case , end="\n\n" )
if len(__snake_case ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowercase : Dict = input_ids[:q_len]
lowercase : Any = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
lowercase : List[str] = []
lowercase : Any = []
lowercase : Dict = []
lowercase : Tuple = [] # null, yes, no, long, short
for i in doc_start_indices:
lowercase : List[str] = i + max_length - q_len
lowercase : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowercase : List[Any] = start_token - i + q_len
lowercase : str = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
lowercase : List[Any] = -100
lowercase : Optional[int] = -100
answers_category.append("null" )
lowercase : Optional[Any] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__snake_case )
answers_end_token.append(__snake_case )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(__snake_case ) )
print("Old:" , tokenizer.decode(__snake_case ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __magic_name__ ( __snake_case : str , __snake_case : str , __snake_case : Optional[Any]=2048 , __snake_case : Optional[int]=4096 , __snake_case : int=False ) -> List[str]:
lowercase : List[str] = get_strided_contexts_and_ans(
__snake_case , __snake_case , doc_stride=__snake_case , max_length=__snake_case , assertion=__snake_case , )
return example
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Union[str, Any]:
with jsonlines.open(__snake_case , "a" ) as writer:
for example in tqdm(__snake_case , total=len(__snake_case ) , desc="Saving samples ... " ):
lowercase : List[str] = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_A : Union[str, Any] = load_dataset("""natural_questions""")
_A : Union[str, Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
_A : Dict = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
_A : int = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
_A : List[str] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_A : List[Any] = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
_A : str = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 518
| 0
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1_60_00 ) -> List[str]:
"""simple docstring"""
snake_case: int =int(round(sample_rate * max_length ) )
if len(__lowerCAmelCase ) <= sample_length:
return wav
snake_case: Optional[Any] =randint(0 , len(__lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class a_ :
UpperCAmelCase : Optional[str] = field(default=__UpperCamelCase , metadata={"""help""": """Name of a dataset from the datasets package"""} )
UpperCAmelCase : Optional[str] = field(
default=__UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCAmelCase : Optional[str] = field(
default=__UpperCamelCase , metadata={"""help""": """A file containing the training audio paths and labels."""} )
UpperCAmelCase : Optional[str] = field(
default=__UpperCamelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
UpperCAmelCase : str = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to \'train\'"""
} , )
UpperCAmelCase : str = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to \'validation\'"""
)
} , )
UpperCAmelCase : str = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to \'audio\'"""} , )
UpperCAmelCase : str = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to \'label\'"""} )
UpperCAmelCase : Optional[int] = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCAmelCase : float = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class a_ :
UpperCAmelCase : str = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
UpperCAmelCase : Optional[str] = field(
default=__UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCAmelCase : Optional[str] = field(
default=__UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
UpperCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCAmelCase : Optional[str] = field(
default=__UpperCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCAmelCase : bool = field(
default=__UpperCamelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
UpperCAmelCase : bool = field(
default=__UpperCamelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
UpperCAmelCase : bool = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCAmelCase : Optional[bool] = field(
default=__UpperCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
UpperCAmelCase : bool = field(
default=__UpperCamelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def UpperCamelCase ( self : Dict ) -> Any:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , a_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def a_ ( ) -> Any:
"""simple docstring"""
snake_case: Optional[int] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case: Dict =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case: str =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case: List[str] =training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
snake_case: Any =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case: Any =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
snake_case: Dict =DatasetDict()
snake_case: List[Any] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
snake_case: List[Any] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f'''{", ".join(raw_datasets["train"].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'Make sure to set `--label_column_name` to the correct text column - one of '
f'''{", ".join(raw_datasets["train"].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
snake_case: Dict =AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
snake_case: List[str] =raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
snake_case: Union[str, Any] =feature_extractor.model_input_names[0]
def train_transforms(__UpperCAmelCase ):
snake_case: Optional[int] =[]
for audio in batch[data_args.audio_column_name]:
snake_case: Union[str, Any] =random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowerCAmelCase )
snake_case: List[Any] =feature_extractor(__lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
snake_case: List[str] ={model_input_name: inputs.get(__lowerCAmelCase )}
snake_case: int =list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__UpperCAmelCase ):
snake_case: Any =[audio['array'] for audio in batch[data_args.audio_column_name]]
snake_case: List[Any] =feature_extractor(__lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
snake_case: List[Any] ={model_input_name: inputs.get(__lowerCAmelCase )}
snake_case: Union[str, Any] =list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case: Any =raw_datasets['train'].features[data_args.label_column_name].names
snake_case , snake_case: Tuple ={}, {}
for i, label in enumerate(__lowerCAmelCase ):
snake_case: List[str] =str(__lowerCAmelCase )
snake_case: Optional[int] =label
# Load the accuracy metric from the datasets package
snake_case: Optional[Any] =evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
snake_case: Dict =np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowerCAmelCase , references=eval_pred.label_ids )
snake_case: Dict =AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case: Dict =AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case: Optional[Any] =(
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowerCAmelCase , output_all_columns=__lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case: Union[str, Any] =(
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowerCAmelCase , output_all_columns=__lowerCAmelCase )
# Initialize our trainer
snake_case: Tuple =Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
# Training
if training_args.do_train:
snake_case: List[str] =None
if training_args.resume_from_checkpoint is not None:
snake_case: Dict =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case: Dict =last_checkpoint
snake_case: Union[str, Any] =trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case: List[str] =trainer.evaluate()
trainer.log_metrics('eval' , __lowerCAmelCase )
trainer.save_metrics('eval' , __lowerCAmelCase )
# Write model card and (optionally) push to hub
snake_case: str ={
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main()
| 350
|
from math import loga
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276
| 0
|
"""simple docstring"""
from __future__ import annotations
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> tuple[int, int]:
if b == 0:
return (1, 0)
((_snake_case) , (_snake_case)) = extended_euclid(__lowerCamelCase , a % b )
_snake_case = a // b
return (y, x - k * y)
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
((_snake_case) , (_snake_case)) = extended_euclid(__lowerCamelCase , __lowerCamelCase )
_snake_case = na * na
_snake_case = ra * x * na + ra * y * na
return (n % m + m) % m
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
((_snake_case) , (_snake_case)) = extended_euclid(__lowerCamelCase , __lowerCamelCase )
if b < 0:
_snake_case = (b % n + n) % n
return b
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
_snake_case , _snake_case = invert_modulo(__lowerCamelCase , __lowerCamelCase ), invert_modulo(__lowerCamelCase , __lowerCamelCase )
_snake_case = na * na
_snake_case = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 430
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCAmelCase__ :
__a = 42
__a = None
# Automatically constructed
__a = "dict"
__a = None
__a = field(default="""Translation""" , init=A_ , repr=A_ )
def __call__( self : Optional[Any] ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase ( self : Any ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class lowerCAmelCase__ :
__a = None
__a = None
__a = None
# Automatically constructed
__a = "dict"
__a = None
__a = field(default="""TranslationVariableLanguages""" , init=A_ , repr=A_ )
def lowercase ( self : str ):
_snake_case = sorted(set(self.languages ) ) if self.languages else None
_snake_case = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def lowercase ( self : Tuple , _lowerCamelCase : List[Any] ):
_snake_case = set(self.languages )
if self.languages and set(_lowerCamelCase ) - lang_set:
raise ValueError(
f'''Some languages in example ({', '.join(sorted(set(_lowerCamelCase ) - lang_set ) )}) are not in valid set ({', '.join(_lowerCamelCase )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_snake_case = []
for lang, text in translation_dict.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_snake_case , _snake_case = zip(*sorted(_lowerCamelCase ) )
return {"language": languages, "translation": translations}
def lowercase ( self : List[Any] ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 430
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase : List[str] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _A ( lowercase__ = "isbn/0140328726" ):
lowercase__ = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowercase__ = f'''{olid} is not a valid Open Library olid'''
raise ValueError(lowercase__ )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def _A ( lowercase__ ):
lowercase__ = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowercase__ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowercase__ = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowercase__ = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowercase__ , lowercase__ ):
lowercase__ = """, """.join(lowercase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__A = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__A = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 325
| 0
|
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def _lowerCAmelCase ( __lowerCamelCase:str , __lowerCamelCase:str ):
'''simple docstring'''
__magic_name__ = RobertaPreLayerNormConfig.from_pretrained(
__lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
__magic_name__ = torch.load(hf_hub_download(repo_id=__lowerCamelCase , filename="pytorch_model.bin" ) )
__magic_name__ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
__magic_name__ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
__magic_name__ = tensor_value
__magic_name__ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__lowerCamelCase , config=__lowerCamelCase , state_dict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
# convert tokenizer
__magic_name__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 715
|
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _lowerCAmelCase ( __lowerCamelCase:str , __lowerCamelCase:str ):
'''simple docstring'''
__magic_name__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_0_2_4,
"hidden_size": 7_6_8,
"max_length": 5_1_2,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_0_2_4,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=__lowerCamelCase , output_all_encodings=__lowerCamelCase , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , __lowerCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir() , "models" )
__magic_name__ = _load_vocab(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls=__lowerCamelCase )
__magic_name__ = nlp.model.BERTModel(
__lowerCamelCase , len(__lowerCamelCase ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=__lowerCamelCase , use_token_type_embed=__lowerCamelCase , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=__lowerCamelCase , use_decoder=__lowerCamelCase , )
original_bort.load_parameters(__lowerCamelCase , cast_dtype=__lowerCamelCase , ignore_extra=__lowerCamelCase )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.0_2,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(__lowerCamelCase ),
}
__magic_name__ = BertConfig.from_dict(__lowerCamelCase )
__magic_name__ = BertForMaskedLM(__lowerCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCamelCase:Tuple ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCamelCase:str , __lowerCamelCase:Union[str, Any] ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__magic_name__ = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ = tokenizer.encode_plus(__lowerCamelCase )["input_ids"]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=__lowerCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCamelCase )
__magic_name__ = BertModel.from_pretrained(__lowerCamelCase )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(__lowerCamelCase , return_tensors="pt" )
__magic_name__ = hf_bort_model(**__lowerCamelCase )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , __lowerCamelCase )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 468
| 0
|
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( a_ : Optional[int] , a_ : str ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Any = []
for part_id in partition_order:
__SCREAMING_SNAKE_CASE :List[str] = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCAmelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> str:
__SCREAMING_SNAKE_CASE :Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__SCREAMING_SNAKE_CASE :List[Any] = spark.range(1_00 ).repartition(1 )
__SCREAMING_SNAKE_CASE :List[str] = Spark(_lowerCAmelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> str:
__SCREAMING_SNAKE_CASE :Optional[int] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__SCREAMING_SNAKE_CASE :List[Any] = spark.range(10 ).repartition(2 )
__SCREAMING_SNAKE_CASE :Optional[int] = [1, 0]
__SCREAMING_SNAKE_CASE :List[Any] = _generate_iterable_examples(_lowerCAmelCase , _lowerCAmelCase ) # Reverse the partitions.
__SCREAMING_SNAKE_CASE :Any = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , _lowerCAmelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__SCREAMING_SNAKE_CASE :Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Dict = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__SCREAMING_SNAKE_CASE :Tuple = spark.range(10 ).repartition(1 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = SparkExamplesIterable(_lowerCAmelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE :Optional[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__SCREAMING_SNAKE_CASE :Dict = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__SCREAMING_SNAKE_CASE :List[Any] = lambda a_ : x.reverse()
__SCREAMING_SNAKE_CASE :Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [2, 1, 0] )
__SCREAMING_SNAKE_CASE :Optional[Any] = SparkExamplesIterable(_lowerCAmelCase ).shuffle_data_sources(_lowerCAmelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE :Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__SCREAMING_SNAKE_CASE :Optional[int] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__SCREAMING_SNAKE_CASE :Any = SparkExamplesIterable(_lowerCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__SCREAMING_SNAKE_CASE :Any = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE :List[str] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__SCREAMING_SNAKE_CASE :Any = SparkExamplesIterable(_lowerCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__SCREAMING_SNAKE_CASE :List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE :Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> int:
__SCREAMING_SNAKE_CASE :Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__SCREAMING_SNAKE_CASE :List[str] = spark.range(1_00 ).repartition(1 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = Spark(_lowerCAmelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 498
|
from ... import PretrainedConfig
lowercase : Dict = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : List[str] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowercase : Union[str, Any] = 'nezha'
def __init__( self , __UpperCamelCase=2_11_28 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=64 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0.1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , **__UpperCamelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
__UpperCamelCase : int = vocab_size
__UpperCamelCase : int = hidden_size
__UpperCamelCase : Tuple = num_hidden_layers
__UpperCamelCase : Tuple = num_attention_heads
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : List[str] = intermediate_size
__UpperCamelCase : Union[str, Any] = hidden_dropout_prob
__UpperCamelCase : Tuple = attention_probs_dropout_prob
__UpperCamelCase : Optional[int] = max_position_embeddings
__UpperCamelCase : str = max_relative_position
__UpperCamelCase : List[str] = type_vocab_size
__UpperCamelCase : Dict = initializer_range
__UpperCamelCase : Optional[int] = layer_norm_eps
__UpperCamelCase : int = classifier_dropout
__UpperCamelCase : List[str] = use_cache
| 327
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class _a ( __lowercase ):
_lowercase : str = 'audio-spectrogram-transformer'
def __init__( self: int , UpperCamelCase_: Union[str, Any]=768 , UpperCamelCase_: int=12 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Dict=3_072 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Dict=1E-1_2 , UpperCamelCase_: str=16 , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=10 , UpperCamelCase_: Any=10 , UpperCamelCase_: Dict=1_024 , UpperCamelCase_: int=128 , **UpperCamelCase_: str , ) -> Tuple:
"""simple docstring"""
super().__init__(**__A )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = patch_size
lowercase__ = qkv_bias
lowercase__ = frequency_stride
lowercase__ = time_stride
lowercase__ = max_length
lowercase__ = num_mel_bins
| 714
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = list(range(len(SCREAMING_SNAKE_CASE ) ) )
lowercase__ = [v / w for v, w in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
index.sort(key=lambda SCREAMING_SNAKE_CASE : ratio[i] , reverse=SCREAMING_SNAKE_CASE )
lowercase__ = 0
lowercase__ = [0] * len(SCREAMING_SNAKE_CASE )
for i in index:
if weight[i] <= capacity:
lowercase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowercase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429
| 0
|
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self : List[str] , _a : int = 0 ):
UpperCamelCase__ = key
def A_ ( self : Dict , _a : str , _a : int ):
assert isinstance(_a , _a ) and isinstance(_a , _a )
UpperCamelCase__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def A_ ( self : Any , _a : str , _a : int ):
assert isinstance(_a , _a ) and isinstance(_a , _a )
UpperCamelCase__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def A_ ( self : Optional[Any] , _a : str , _a : int = 0 ):
assert isinstance(_a , _a ) and isinstance(_a , _a )
UpperCamelCase__ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
UpperCamelCase__ = ''''''
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def A_ ( self : Any , _a : str , _a : int = 0 ):
assert isinstance(_a , _a ) and isinstance(_a , _a )
UpperCamelCase__ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
UpperCamelCase__ = ''''''
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def A_ ( self : int , _a : str , _a : int = 0 ):
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def A_ ( self : Union[str, Any] , _a : str , _a : int ):
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 240
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = 9
UpperCamelCase__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCamelCase__ = kruskal(UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(UpperCamelCase__ ) == sorted(UpperCamelCase__ )
| 240
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = 42
a__ = 42
def __init__( self , __snake_case , __snake_case):
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case)
@torch.no_grad()
def __call__( self , __snake_case = 1 , __snake_case = 50 , __snake_case = None , __snake_case = "pil" , __snake_case = True , **__snake_case , ):
_UpperCamelCase : Tuple = self.unet.config.sample_size
_UpperCamelCase : Optional[int] = (batch_size, 3, img_size, img_size)
_UpperCamelCase : str = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_UpperCamelCase : List[str] = randn_tensor(__snake_case , generator=__snake_case , device=self.device) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__snake_case)
for t in self.progress_bar(self.scheduler.timesteps):
# here sigma_t == t_i from the paper
_UpperCamelCase : List[Any] = self.scheduler.schedule[t]
_UpperCamelCase : List[Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.scheduler.add_noise_to_input(__snake_case , __snake_case , generator=__snake_case)
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_UpperCamelCase : Tuple = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_UpperCamelCase : Tuple = self.scheduler.step(__snake_case , __snake_case , __snake_case , __snake_case)
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_UpperCamelCase : List[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2).sample
_UpperCamelCase : int = self.scheduler.step_correct(
__snake_case , __snake_case , __snake_case , __snake_case , step_output.prev_sample , step_output['derivative'] , )
_UpperCamelCase : Any = step_output.prev_sample
_UpperCamelCase : int = (sample / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase : List[Any] = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase : Any = self.numpy_to_pil(__snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case)
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 1
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCAmelCase__ :
def __init__( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : int=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : str=True , __UpperCamelCase : Optional[Any]=99 , __UpperCamelCase : List[str]=64 , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Tuple=5 , __UpperCamelCase : int=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : List[str]=512 , __UpperCamelCase : Any=16 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=4 , __UpperCamelCase : List[str]=None , ) -> List[Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = embedding_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> Optional[int]:
A = MegatronBertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
A = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ) -> Union[str, Any]:
A = MegatronBertForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ) -> int:
A = MegatronBertForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ) -> Any:
A = MegatronBertForNextSentencePrediction(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) -> Optional[Any]:
A = MegatronBertForPreTraining(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , next_sentence_label=__UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ) -> Union[str, Any]:
A = MegatronBertForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> List[str]:
A = self.num_labels
A = MegatronBertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ) -> Tuple:
A = self.num_labels
A = MegatronBertForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] ) -> List[Any]:
A = self.num_choices
A = MegatronBertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : Dict = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = True
# test_resize_embeddings = False
A_ : Union[str, Any] = False
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Tuple=False ) -> Any:
A = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase )
A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def __UpperCamelCase ( self : str ) -> str:
A = MegatronBertModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : int ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__UpperCamelCase )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCamelCase )
def __UpperCamelCase ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCamelCase )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCamelCase )
def __UpperCamelCase ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCamelCase )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCamelCase )
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return torch.tensor(
lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ , )
__snake_case :Optional[Any] =1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
@unittest.skip('Model is not available.' )
def __UpperCamelCase ( self : Optional[int] ) -> str:
A = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
A = os.path.join(os.environ['MYDIR'] , __UpperCamelCase )
A = MegatronBertModel.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
model.half()
A = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
A = model(__UpperCamelCase )[0]
A = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape , __UpperCamelCase )
A = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
A = output[0, ii, jj]
A = expected[3 * ii + jj]
A = 'ii={} jj={} a={} b={}'.format(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.assertTrue(math.isclose(__UpperCamelCase , __UpperCamelCase , rel_tol=__UpperCamelCase , abs_tol=__UpperCamelCase ) , msg=__UpperCamelCase )
| 106
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Any, lowerCamelCase_: Optional[Any], lowerCamelCase_: List[str]=13, lowerCamelCase_: Optional[Any]=7, lowerCamelCase_: Optional[Any]=True, lowerCamelCase_: Tuple=True, lowerCamelCase_: Any=False, lowerCamelCase_: Union[str, Any]=True, lowerCamelCase_: Optional[Any]=99, lowerCamelCase_: Tuple=32, lowerCamelCase_: Any=5, lowerCamelCase_: Tuple=4, lowerCamelCase_: List[Any]=37, lowerCamelCase_: Union[str, Any]="gelu", lowerCamelCase_: str=0.1, lowerCamelCase_: Union[str, Any]=0.1, lowerCamelCase_: Any=512, lowerCamelCase_: Union[str, Any]=16, lowerCamelCase_: Any=2, lowerCamelCase_: str=0.0_2, lowerCamelCase_: Union[str, Any]=3, lowerCamelCase_: List[str]=4, lowerCamelCase_: Tuple=None, ):
lowercase__ : List[str] = parent
lowercase__ : str = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : List[str] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : str = use_labels
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Dict = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Tuple = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : List[Any] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : Optional[int] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Optional[int] = num_choices
lowercase__ : Dict = scope
def snake_case__( self: Union[str, Any] ):
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ : List[Any] = None
if self.use_input_mask:
lowercase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[Any] = None
if self.use_token_type_ids:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ : str = None
lowercase__ : Union[str, Any] = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size], self.num_choices )
lowercase__ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self: Tuple ):
return OpenLlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, use_stable_embedding=lowerCamelCase_, )
def snake_case__( self: Optional[int], lowerCamelCase_: Optional[int], lowerCamelCase_: List[Any], lowerCamelCase_: List[str], lowerCamelCase_: Optional[int], lowerCamelCase_: Dict, lowerCamelCase_: Optional[int], lowerCamelCase_: str ):
lowercase__ : Union[str, Any] = OpenLlamaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Union[str, Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowercase__ : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: str, lowerCamelCase_: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: Tuple, lowerCamelCase_: Optional[Any], lowerCamelCase_: List[str], lowerCamelCase_: Dict, lowerCamelCase_: Union[str, Any], ):
lowercase__ : Tuple = True
lowercase__ : int = OpenLlamaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, )
lowercase__ : Optional[int] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, )
lowercase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self: List[Any], lowerCamelCase_: Optional[Any], lowerCamelCase_: Any, lowerCamelCase_: str, lowerCamelCase_: List[str], lowerCamelCase_: Any, lowerCamelCase_: Dict, lowerCamelCase_: int, lowerCamelCase_: Any, lowerCamelCase_: str, ):
lowercase__ : Optional[Any] = OpenLlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self: Tuple, lowerCamelCase_: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: Optional[int], lowerCamelCase_: str, lowerCamelCase_: List[Any], lowerCamelCase_: List[Any], lowerCamelCase_: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: Union[str, Any], ):
lowercase__ : Optional[int] = True
lowercase__ : Optional[int] = True
lowercase__ : Tuple = OpenLlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowercase__ : Any = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_, )
lowercase__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
lowercase__ : str = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowercase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowercase__ : List[str] = torch.cat([input_mask, next_mask], dim=-1 )
lowercase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
lowercase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowercase__ : Tuple = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowercase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-3 ) )
def snake_case__( self: Optional[Any] ):
lowercase__ : Dict = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : List[str] = config_and_inputs
lowercase__ : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_A = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_A = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_A = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = False
_A = False
def snake_case__( self: Any ):
lowercase__ : List[Any] = OpenLlamaModelTester(self )
lowercase__ : Dict = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=37 )
def snake_case__( self: Any ):
self.config_tester.run_common_tests()
def snake_case__( self: Dict ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def snake_case__( self: str ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : Dict = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def snake_case__( self: Any ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = 3
lowercase__ : Union[str, Any] = input_dict['input_ids']
lowercase__ : Union[str, Any] = input_ids.ne(1 ).to(lowerCamelCase_ )
lowercase__ : int = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
lowercase__ : Optional[Any] = OpenLlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self: Any ):
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = 3
lowercase__ : Optional[Any] = 'single_label_classification'
lowercase__ : Union[str, Any] = input_dict['input_ids']
lowercase__ : str = input_ids.ne(1 ).to(lowerCamelCase_ )
lowercase__ : Any = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
lowercase__ : Dict = OpenLlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self: Union[str, Any] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = 3
lowercase__ : List[Any] = 'multi_label_classification'
lowercase__ : Any = input_dict['input_ids']
lowercase__ : int = input_ids.ne(1 ).to(lowerCamelCase_ )
lowercase__ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ : Optional[int] = OpenLlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def snake_case__( self: List[str] ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__( self: Any, lowerCamelCase_: List[Any] ):
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[int] = ids_tensor([1, 10], config.vocab_size )
lowercase__ : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : str = OpenLlamaModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowercase__ : Union[str, Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowercase__ : List[str] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : List[str] = {'type': scaling_type, 'factor': 1_0.0}
lowercase__ : Union[str, Any] = OpenLlamaModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowercase__ : Dict = scaled_model(lowerCamelCase_ ).last_hidden_state
lowercase__ : Union[str, Any] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-5 ) )
| 266
| 0
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str=13 , lowerCAmelCase__ :Tuple=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :str=False , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :Optional[int]=32 , lowerCAmelCase__ :Dict=5 , lowerCAmelCase__ :Optional[Any]=4 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Any=512 , lowerCAmelCase__ :Optional[int]=16 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :List[str]=0.02 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :Optional[Any]=1 , ) -> Dict:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Tuple = batch_size
__SCREAMING_SNAKE_CASE : Optional[int] = seq_length
__SCREAMING_SNAKE_CASE : Optional[int] = is_training
__SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
__SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids
__SCREAMING_SNAKE_CASE : str = use_labels
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
__SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
__SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Any = num_choices
__SCREAMING_SNAKE_CASE : List[str] = scope
__SCREAMING_SNAKE_CASE : Dict = q_groups
__SCREAMING_SNAKE_CASE : List[str] = k_groups
__SCREAMING_SNAKE_CASE : Any = v_groups
__SCREAMING_SNAKE_CASE : Any = post_attention_groups
__SCREAMING_SNAKE_CASE : Optional[int] = intermediate_groups
__SCREAMING_SNAKE_CASE : List[str] = output_groups
def __magic_name__( self :List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__( self :List[str] ) -> Tuple:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : int = SqueezeBertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Any , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = SqueezeBertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = SqueezeBertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = self.num_labels
__SCREAMING_SNAKE_CASE : Any = SqueezeBertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : str = SqueezeBertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = self.num_choices
__SCREAMING_SNAKE_CASE : Optional[int] = SqueezeBertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Any = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__( self :Any ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(__SCREAMING_SNAKE_CASE) : Union[str, Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE__ : Dict = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : int = False
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[str] = SqueezeBertModelTester(self )
__SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , dim=37 )
def __magic_name__( self :str ) -> str:
self.config_tester.run_common_tests()
def __magic_name__( self :Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = SqueezeBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Any = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : str = torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 ) )
| 716
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ : int = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE__ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> Union[str, Any]:
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ :Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ :Any , ) -> BatchEncoding:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# add pixel_values + pixel_mask
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_center_crop=lowerCAmelCase__ , **lowerCAmelCase__ )
encoding.update(lowerCAmelCase__ )
return encoding
def __magic_name__( self :Tuple , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :Any ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 260
| 0
|
import numpy as np
class A :
def __init__( self: Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =(0, 0)
UpperCAmelCase_ =None
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
def __eq__( self: List[Any] , _lowerCAmelCase: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.position == cell.position
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
print(self.position )
class A :
def __init__( self: Dict , _lowerCAmelCase: Optional[int]=(5, 5) ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =np.zeros(_lowerCAmelCase )
UpperCAmelCase_ =world_size[0]
UpperCAmelCase_ =world_size[1]
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
print(self.w )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =[
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCAmelCase_ =cell.position[0]
UpperCAmelCase_ =cell.position[1]
UpperCAmelCase_ =[]
for n in neughbour_cord:
UpperCAmelCase_ =current_x + n[0]
UpperCAmelCase_ =current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCAmelCase_ =Cell()
UpperCAmelCase_ =(x, y)
UpperCAmelCase_ =cell
neighbours.append(_lowerCAmelCase )
return neighbours
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[]
UpperCAmelCase_ =[]
_open.append(lowercase__ )
while _open:
UpperCAmelCase_ =np.argmin([n.f for n in _open] )
UpperCAmelCase_ =_open[min_f]
_closed.append(_open.pop(lowercase__ ) )
if current == goal:
break
for n in world.get_neigbours(lowercase__ ):
for c in _closed:
if c == n:
continue
UpperCAmelCase_ =current.g + 1
UpperCAmelCase_ , UpperCAmelCase_ =n.position
UpperCAmelCase_ , UpperCAmelCase_ =goal.position
UpperCAmelCase_ =(ya - ya) ** 2 + (xa - xa) ** 2
UpperCAmelCase_ =n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowercase__ )
UpperCAmelCase_ =[]
while current.parent is not None:
path.append(current.position )
UpperCAmelCase_ =current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__lowercase : List[Any] =Gridworld()
# Start position and goal
__lowercase : List[Any] =Cell()
__lowercase : List[Any] =(0, 0)
__lowercase : str =Cell()
__lowercase : Tuple =(4, 4)
print(f"""path from {start.position} to {goal.position}""")
__lowercase : str =astar(world, start, goal)
# Just for visual reasons.
for i in s:
__lowercase : Optional[int] =1
print(world.w)
| 54
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ) -> List[Any]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def _a ( self ) -> str:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = True
lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = True
__A = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = FlaxBertModelTester(self )
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 588
| 0
|
'''simple docstring'''
from typing import Any
class _SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , a__ : Any ):
__magic_name__ = data
__magic_name__ = None
class _SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] ):
__magic_name__ = None
def snake_case__ ( self : Dict ):
__magic_name__ = self.head
while temp is not None:
print(temp.data , end=''' ''' )
__magic_name__ = temp.next
print()
def snake_case__ ( self : Dict , a__ : Any ):
__magic_name__ = Node(a__ )
__magic_name__ = self.head
__magic_name__ = new_node
def snake_case__ ( self : Any , a__ : Tuple , a__ : List[Any] ):
if node_data_a == node_data_a:
return
else:
__magic_name__ = self.head
while node_a is not None and node_a.data != node_data_a:
__magic_name__ = node_a.next
__magic_name__ = self.head
while node_a is not None and node_a.data != node_data_a:
__magic_name__ = node_a.next
if node_a is None or node_a is None:
return
__magic_name__ , __magic_name__ = node_a.data, node_a.data
if __name__ == "__main__":
_lowerCAmelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 245
|
'''simple docstring'''
from typing import List
import numpy as np
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
__magic_name__ = {key: len(a ) for key, value in gen_kwargs.items() if isinstance(a , a )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
__magic_name__ = max(lists_lengths.values() , default=0 )
return max(1 , a )
def UpperCamelCase ( a , a ) -> List[range]:
'''simple docstring'''
__magic_name__ = []
for group_idx in range(a ):
__magic_name__ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__magic_name__ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__magic_name__ = range(a , start + num_shards_to_add )
shards_indices_per_group.append(a )
return shards_indices_per_group
def UpperCamelCase ( a , a ) -> List[dict]:
'''simple docstring'''
__magic_name__ = _number_of_shards_in_gen_kwargs(a )
if num_shards == 1:
return [dict(a )]
else:
__magic_name__ = _distribute_shards(num_shards=a , max_num_jobs=a )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(a , a )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(a ) )
]
def UpperCamelCase ( a ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , a )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def UpperCamelCase ( a , a ) -> dict:
'''simple docstring'''
__magic_name__ = {len(a ) for value in gen_kwargs.values() if isinstance(a , a )}
__magic_name__ = {}
for size in list_sizes:
__magic_name__ = list(range(a ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__magic_name__ = dict(a )
for key, value in shuffled_kwargs.items():
if isinstance(a , a ):
__magic_name__ = [value[i] for i in indices_per_size[len(a )]]
return shuffled_kwargs
| 245
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Union[str, Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
from importlib import import_module
from .logging import get_logger
__snake_case = get_logger(__name__)
class lowercase__ :
def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int=None ):
SCREAMING_SNAKE_CASE__ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = module._original_module if isinstance(UpperCAmelCase_ , _PatchedModuleObj ) else module
class lowercase__ :
A__ : Optional[int] =[]
def __init__( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple=None ):
SCREAMING_SNAKE_CASE__ = obj
SCREAMING_SNAKE_CASE__ = target
SCREAMING_SNAKE_CASE__ = new
SCREAMING_SNAKE_CASE__ = target.split('.' )[0]
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = attrs or []
def __enter__( self : int ):
*SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(UpperCAmelCase_ ) ):
try:
SCREAMING_SNAKE_CASE__ = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
SCREAMING_SNAKE_CASE__ = getattr(self.obj , UpperCAmelCase_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(UpperCAmelCase_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
SCREAMING_SNAKE_CASE__ = obj_attr
# patch at top level
setattr(self.obj , UpperCAmelCase_ , _PatchedModuleObj(UpperCAmelCase_ , attrs=self.attrs ) )
SCREAMING_SNAKE_CASE__ = getattr(self.obj , UpperCAmelCase_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , _PatchedModuleObj(getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , attrs=self.attrs ) )
SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
# finally set the target attribute
setattr(UpperCAmelCase_ , UpperCAmelCase_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
SCREAMING_SNAKE_CASE__ = getattr(import_module('.'.join(UpperCAmelCase_ ) ) , UpperCAmelCase_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , UpperCAmelCase_ ) is attr_value:
SCREAMING_SNAKE_CASE__ = getattr(self.obj , UpperCAmelCase_ )
setattr(self.obj , UpperCAmelCase_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
SCREAMING_SNAKE_CASE__ = globals()['__builtins__'][target_attr]
setattr(self.obj , UpperCAmelCase_ , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : Any , *UpperCAmelCase_ : Dict ):
for attr in list(self.original ):
setattr(self.obj , UpperCAmelCase_ , self.original.pop(UpperCAmelCase_ ) )
def A_ ( self : Any ):
self.__enter__()
self._active_patches.append(self )
def A_ ( self : List[str] ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 472
| 0
|
"""simple docstring"""
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for data in source_data:
for i, el in enumerate(UpperCAmelCase__ ):
if len(UpperCAmelCase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCAmelCase__ ) )
return data_lists
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for dlist, weight in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_SCREAMING_SNAKE_CASE = F'''Invalid weight of {weight:f} provided'''
raise ValueError(UpperCAmelCase__ )
score_lists.append(UpperCAmelCase__ )
return score_lists
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = final_scores[j] + ele
return final_scores
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_data(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = calculate_each_score(UpperCAmelCase__ , UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = generate_final_scores(UpperCAmelCase__ )
# append scores to source data
for i, ele in enumerate(UpperCAmelCase__ ):
source_data[i].append(UpperCAmelCase__ )
return source_data
| 715
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case( __A , unittest.TestCase ):
_A = DanceDiffusionPipeline
_A = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_A = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
_A = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_A = False
_A = False
def A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=A_ , use_timestep_embedding=A_ , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_SCREAMING_SNAKE_CASE = IPNDMScheduler()
_SCREAMING_SNAKE_CASE = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def A ( self , A_ , A_=0 ):
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(A_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(A_ )
_SCREAMING_SNAKE_CASE = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(**A_ )
_SCREAMING_SNAKE_CASE = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A_ )
_SCREAMING_SNAKE_CASE = pipe(**A_ )
_SCREAMING_SNAKE_CASE = output.audios
_SCREAMING_SNAKE_CASE = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_SCREAMING_SNAKE_CASE = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def A ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def A ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def A ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def A ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch_device
_SCREAMING_SNAKE_CASE = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_SCREAMING_SNAKE_CASE = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(generator=A_ , num_inference_steps=100 , audio_length_in_s=4.096 )
_SCREAMING_SNAKE_CASE = output.audios
_SCREAMING_SNAKE_CASE = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_SCREAMING_SNAKE_CASE = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch_device
_SCREAMING_SNAKE_CASE = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(generator=A_ , num_inference_steps=100 , audio_length_in_s=4.096 )
_SCREAMING_SNAKE_CASE = output.audios
_SCREAMING_SNAKE_CASE = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_SCREAMING_SNAKE_CASE = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 168
| 0
|
_snake_case = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_snake_case = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_snake_case = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_snake_case = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_snake_case = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_snake_case = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_snake_case = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_snake_case = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 500
|
import math
import sys
def A ( _lowerCamelCase ):
'''simple docstring'''
if number != int(_lowerCamelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
_lowerCAmelCase : Union[str, Any] = [-1] * (number + 1)
_lowerCAmelCase : Optional[Any] = 0
for i in range(1 , number + 1 ):
_lowerCAmelCase : List[Any] = sys.maxsize
_lowerCAmelCase : str = int(math.sqrt(_lowerCamelCase ) )
for j in range(1 , root + 1 ):
_lowerCAmelCase : Dict = 1 + answers[i - (j**2)]
_lowerCAmelCase : List[str] = min(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 500
| 1
|
from math import factorial
def _lowercase ( a__ : int = 1_00 ) -> int:
"""simple docstring"""
return sum(map(a__ , str(factorial(a__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 589
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589
| 1
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Optional[Any] = AudioLDMPipeline
a__ : Optional[Any] = TEXT_TO_AUDIO_PARAMS
a__ : List[str] = TEXT_TO_AUDIO_BATCH_PARAMS
a__ : int = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
def snake_case_ ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__lowerCAmelCase , )
_A = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_A = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , )
_A = ClapTextModelWithProjection(__lowerCAmelCase )
_A = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 )
_A = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__lowerCAmelCase , )
_A = SpeechTaHifiGan(__lowerCAmelCase )
_A = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def snake_case_ ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple=0 ) -> Optional[Any]:
if str(__lowerCAmelCase ).startswith('''mps''' ):
_A = torch.manual_seed(__lowerCAmelCase )
else:
_A = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_A = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def snake_case_ ( self : List[str] ) -> int:
_A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = AudioLDMPipeline(**__lowerCAmelCase )
_A = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_A = self.get_dummy_inputs(__lowerCAmelCase )
_A = audioldm_pipe(**__lowerCAmelCase )
_A = output.audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase ) == 2_56
_A = audio[:10]
_A = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def snake_case_ ( self : Optional[int] ) -> str:
_A = self.get_dummy_components()
_A = AudioLDMPipeline(**__lowerCAmelCase )
_A = audioldm_pipe.to(__lowerCAmelCase )
_A = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_A = self.get_dummy_inputs(__lowerCAmelCase )
_A = 3 * [inputs['''prompt''']]
# forward
_A = audioldm_pipe(**__lowerCAmelCase )
_A = output.audios[0]
_A = self.get_dummy_inputs(__lowerCAmelCase )
_A = 3 * [inputs.pop('''prompt''' )]
_A = audioldm_pipe.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors='''pt''' , )
_A = text_inputs['''input_ids'''].to(__lowerCAmelCase )
_A = audioldm_pipe.text_encoder(
__lowerCAmelCase , )
_A = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_A = F.normalize(__lowerCAmelCase , dim=-1 )
_A = prompt_embeds
# forward
_A = audioldm_pipe(**__lowerCAmelCase )
_A = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def snake_case_ ( self : List[Any] ) -> Union[str, Any]:
_A = self.get_dummy_components()
_A = AudioLDMPipeline(**__lowerCAmelCase )
_A = audioldm_pipe.to(__lowerCAmelCase )
_A = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_A = self.get_dummy_inputs(__lowerCAmelCase )
_A = 3 * ['''this is a negative prompt''']
_A = negative_prompt
_A = 3 * [inputs['''prompt''']]
# forward
_A = audioldm_pipe(**__lowerCAmelCase )
_A = output.audios[0]
_A = self.get_dummy_inputs(__lowerCAmelCase )
_A = 3 * [inputs.pop('''prompt''' )]
_A = []
for p in [prompt, negative_prompt]:
_A = audioldm_pipe.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors='''pt''' , )
_A = text_inputs['''input_ids'''].to(__lowerCAmelCase )
_A = audioldm_pipe.text_encoder(
__lowerCAmelCase , )
_A = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_A = F.normalize(__lowerCAmelCase , dim=-1 )
embeds.append(__lowerCAmelCase )
_A , _A = embeds
# forward
_A = audioldm_pipe(**__lowerCAmelCase )
_A = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def snake_case_ ( self : Tuple ) -> Dict:
_A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
_A = AudioLDMPipeline(**__lowerCAmelCase )
_A = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_A = self.get_dummy_inputs(__lowerCAmelCase )
_A = '''egg cracking'''
_A = audioldm_pipe(**__lowerCAmelCase , negative_prompt=__lowerCAmelCase )
_A = output.audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase ) == 2_56
_A = audio[:10]
_A = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def snake_case_ ( self : Union[str, Any] ) -> Optional[Any]:
_A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
_A = AudioLDMPipeline(**__lowerCAmelCase )
_A = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_A = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
_A = audioldm_pipe(__lowerCAmelCase , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_A = 2
_A = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
_A = 2
_A = audioldm_pipe(__lowerCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=__lowerCAmelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
_A = 2
_A = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__lowerCAmelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def snake_case_ ( self : Tuple ) -> Any:
_A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = AudioLDMPipeline(**__lowerCAmelCase )
_A = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_A = audioldm_pipe.vocoder.config.sampling_rate
_A = self.get_dummy_inputs(__lowerCAmelCase )
_A = audioldm_pipe(audio_length_in_s=0.016 , **__lowerCAmelCase )
_A = output.audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase ) / vocoder_sampling_rate == 0.016
_A = audioldm_pipe(audio_length_in_s=0.032 , **__lowerCAmelCase )
_A = output.audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase ) / vocoder_sampling_rate == 0.032
def snake_case_ ( self : str ) -> List[Any]:
_A = self.get_dummy_components()
_A = AudioLDMPipeline(**__lowerCAmelCase )
_A = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_A = ['''hey''']
_A = audioldm_pipe(__lowerCAmelCase , num_inference_steps=1 )
_A = output.audios.shape
assert audio_shape == (1, 2_56)
_A = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_A = SpeechTaHifiGan(__lowerCAmelCase ).to(__lowerCAmelCase )
_A = audioldm_pipe(__lowerCAmelCase , num_inference_steps=1 )
_A = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def snake_case_ ( self : Optional[int] ) -> int:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowerCAmelCase )
def snake_case_ ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(test_mean_pixel_difference=__lowerCAmelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case_ ( self : int ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowerCAmelCase )
@slow
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Optional[int] ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]="cpu" , __lowerCAmelCase : List[Any]=torch.floataa , __lowerCAmelCase : List[Any]=0 ) -> int:
_A = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_A = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 8, 1_28, 16) )
_A = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
_A = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def snake_case_ ( self : Optional[Any] ) -> List[Any]:
_A = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
_A = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_A = self.get_inputs(__lowerCAmelCase )
_A = 25
_A = audioldm_pipe(**__lowerCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase ) == 8_19_20
_A = audio[7_72_30:7_72_40]
_A = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_A = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def snake_case_ ( self : Union[str, Any] ) -> Optional[Any]:
_A = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
_A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_A = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_A = self.get_inputs(__lowerCAmelCase )
_A = audioldm_pipe(**__lowerCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase ) == 8_19_20
_A = audio[2_77_80:2_77_90]
_A = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_A = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 2
|
UpperCAmelCase_ = 0 # The first color of the flag.
UpperCAmelCase_ = 1 # The second color of the flag.
UpperCAmelCase_ = 2 # The third color of the flag.
UpperCAmelCase_ = (red, white, blue)
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list:
if not sequence:
return []
if len(_snake_case ) == 1:
return list(_snake_case )
_A = 0
_A = len(_snake_case ) - 1
_A = 0
while mid <= high:
if sequence[mid] == colors[0]:
_A , _A = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_A , _A = sequence[high], sequence[mid]
high -= 1
else:
_A = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 2
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict ):
'''simple docstring'''
lowercase = len(lowerCamelCase__ )
lowercase = sum(lowerCamelCase__ )
lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowercase = True
for i in range(1 , s + 1 ):
lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowercase = s - 2 * j
break
return diff
| 710
|
"""simple docstring"""
from torch import nn
class a ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__()
lowercase = class_size
lowercase = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowercase = nn.Linear(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowercase = self.mlp(_lowerCamelCase )
return logits
| 134
| 0
|
'''simple docstring'''
from __future__ import annotations
__A = 8.9_8_8E9 # units = N * m^s * C^-2
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
lowercase__ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase__ = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase__ = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase__ = (COULOMBS_CONSTANT * charge_product / abs(__SCREAMING_SNAKE_CASE )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 136
|
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _a ( lowerCamelCase_ ):
snake_case : List[Any] =prime_factors(lowerCamelCase_ )
if is_square_free(lowerCamelCase_ ):
return -1 if len(lowerCamelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
A__ : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = ['input_values', 'padding_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2_40_00 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> str:
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = chunk_length_s
__lowerCamelCase : Union[str, Any] = overlap
@property
def lowercase_ ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowercase_ ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
__lowerCamelCase : Tuple = True
__lowerCamelCase : Tuple = bool(
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
__lowerCamelCase : Tuple = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
__lowerCamelCase : List[Any] = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__lowerCamelCase : List[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ).T]
# verify inputs are valid
for idx, example in enumerate(SCREAMING_SNAKE_CASE_ ):
if example.ndim > 2:
raise ValueError(f'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'Expected stereo audio but example has {example.shape[-1]} channels' )
__lowerCamelCase : Any = None
__lowerCamelCase : Dict = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__lowerCamelCase : Optional[Any] = min(array.shape[0] for array in raw_audio )
__lowerCamelCase : List[str] = int(np.floor(max_length / self.chunk_stride ) )
__lowerCamelCase : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__lowerCamelCase : List[Any] = max(array.shape[0] for array in raw_audio )
__lowerCamelCase : int = int(np.ceil(max_length / self.chunk_stride ) )
__lowerCamelCase : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
__lowerCamelCase : Union[str, Any] = 'max_length'
else:
__lowerCamelCase : Union[str, Any] = input_values
# normal padding on batch
if padded_inputs is None:
__lowerCamelCase : Any = self.pad(
SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
if padding:
__lowerCamelCase : List[str] = padded_inputs.pop('attention_mask' )
__lowerCamelCase : str = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
__lowerCamelCase : Any = example[..., None]
input_values.append(example.T )
__lowerCamelCase : List[str] = input_values
if return_tensors is not None:
__lowerCamelCase : Union[str, Any] = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
| 13
|
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE_=[1, 2, 3] , ) -> Any:
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[int] = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : List[Any] = depths
__lowerCamelCase : int = num_heads
__lowerCamelCase : Optional[Any] = window_size
__lowerCamelCase : Optional[Any] = mlp_ratio
__lowerCamelCase : List[str] = qkv_bias
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Union[str, Any] = use_absolute_embeddings
__lowerCamelCase : Any = patch_norm
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Dict = is_training
__lowerCamelCase : Optional[Any] = scope
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Dict = encoder_stride
__lowerCamelCase : Union[str, Any] = out_features
__lowerCamelCase : str = out_indices
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Optional[int]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = ['stem']
__lowerCamelCase : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase : int = False
lowerCamelCase : int = False
lowerCamelCase : str = False
lowerCamelCase : int = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
__lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def lowercase_ ( self ) -> int:
pass
def lowercase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Tuple:
return
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Swin does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : str = [*signature.parameters.keys()]
__lowerCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def lowercase_ ( self ) -> List[Any]:
pass
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : int = outputs.hidden_states
__lowerCamelCase : Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swin has a different seq_length
__lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Tuple = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has'
f' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.'
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
__lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
__lowerCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ (unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase : List[str] = MaskFormerSwinConfig
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : List[str] = MaskFormerSwinModelTester(self )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ )
backbone.to(SCREAMING_SNAKE_CASE_ )
backbone.eval()
__lowerCamelCase : int = backbone(**SCREAMING_SNAKE_CASE_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowerCamelCase : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowerCamelCase : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.attentions )
| 13
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowercase__ ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowercase__ ( __UpperCamelCase : Any , __UpperCamelCase : Any=None ):
'''simple docstring'''
__lowercase = None
if token is not None:
__lowercase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
__lowercase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__lowercase = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
__lowercase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
__lowercase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__UpperCamelCase ):
__lowercase = requests.get(url + F'''&page={i + 2}''' , headers=__UpperCamelCase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowercase__ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple=None ):
'''simple docstring'''
__lowercase = None
if token is not None:
__lowercase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
__lowercase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
__lowercase = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
__lowercase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
__lowercase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__UpperCamelCase ):
__lowercase = requests.get(url + F'''&page={i + 2}''' , headers=__UpperCamelCase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowercase__ ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
__lowercase = None
if token is not None:
__lowercase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
__lowercase = requests.get(__UpperCamelCase , headers=__UpperCamelCase , allow_redirects=__UpperCamelCase )
__lowercase = result.headers["""Location"""]
__lowercase = requests.get(__UpperCamelCase , allow_redirects=__UpperCamelCase )
__lowercase = os.path.join(__UpperCamelCase , F'''{artifact_name}.zip''' )
with open(__UpperCamelCase , """wb""" ) as fp:
fp.write(response.content )
def lowercase__ ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple=None ):
'''simple docstring'''
__lowercase = []
__lowercase = []
__lowercase = None
with zipfile.ZipFile(__UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__UpperCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__UpperCamelCase ) as f:
for line in f:
__lowercase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__lowercase = line[: line.index(""": """ )]
__lowercase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
__lowercase = line[len("""FAILED """ ) :]
failed_tests.append(__UpperCamelCase )
elif filename == "job_name.txt":
__lowercase = line
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(__UpperCamelCase )} for `errors` '''
F'''and {len(__UpperCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
__lowercase = None
if job_name and job_links:
__lowercase = job_links.get(__UpperCamelCase , __UpperCamelCase )
# A list with elements of the form (line of error, error, failed test)
__lowercase = [x + [y] + [job_link] for x, y in zip(__UpperCamelCase , __UpperCamelCase )]
return result
def lowercase__ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=None ):
'''simple docstring'''
__lowercase = []
__lowercase = [os.path.join(__UpperCamelCase , __UpperCamelCase ) for p in os.listdir(__UpperCamelCase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__UpperCamelCase , job_links=__UpperCamelCase ) )
return errors
def lowercase__ ( __UpperCamelCase : Any , __UpperCamelCase : List[Any]=None ):
'''simple docstring'''
__lowercase = Counter()
counter.update([x[1] for x in logs] )
__lowercase = counter.most_common()
__lowercase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__lowercase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
__lowercase = dict(sorted(r.items() , key=lambda __UpperCamelCase : item[1]["count"] , reverse=__UpperCamelCase ) )
return r
def lowercase__ ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
__lowercase = test.split("""/""" )[2]
else:
__lowercase = None
return test
def lowercase__ ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=None ):
'''simple docstring'''
__lowercase = [(x[0], x[1], get_model(x[2] )) for x in logs]
__lowercase = [x for x in logs if x[2] is not None]
__lowercase = {x[2] for x in logs}
__lowercase = {}
for test in tests:
__lowercase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__lowercase = counter.most_common()
__lowercase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__lowercase = sum(error_counts.values() )
if n_errors > 0:
__lowercase = {"""count""": n_errors, """errors""": error_counts}
__lowercase = dict(sorted(r.items() , key=lambda __UpperCamelCase : item[1]["count"] , reverse=__UpperCamelCase ) )
return r
def lowercase__ ( __UpperCamelCase : int ):
'''simple docstring'''
__lowercase = """| no. | error | status |"""
__lowercase = """|-:|:-|:-|"""
__lowercase = [header, sep]
for error in reduced_by_error:
__lowercase = reduced_by_error[error]["""count"""]
__lowercase = F'''| {count} | {error[:100]} | |'''
lines.append(__UpperCamelCase )
return "\n".join(__UpperCamelCase )
def lowercase__ ( __UpperCamelCase : Tuple ):
'''simple docstring'''
__lowercase = """| model | no. of errors | major error | count |"""
__lowercase = """|-:|-:|-:|-:|"""
__lowercase = [header, sep]
for model in reduced_by_model:
__lowercase = reduced_by_model[model]["""count"""]
__lowercase , __lowercase = list(reduced_by_model[model]["""errors"""].items() )[0]
__lowercase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(__UpperCamelCase )
return "\n".join(__UpperCamelCase )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
snake_case : Any = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
snake_case : List[str] = get_job_links(args.workflow_run_id, token=args.token)
snake_case : List[str] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
snake_case : str = k.find(' / ')
snake_case : str = k[index + len(' / ') :]
snake_case : List[Any] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
snake_case : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
snake_case : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
snake_case : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
snake_case : Union[str, Any] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
snake_case : List[str] = reduce_by_error(errors)
snake_case : str = reduce_by_model(errors)
snake_case : str = make_github_table(reduced_by_error)
snake_case : Dict = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 339
| 0
|
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
snake_case__ : Optional[Any] = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
snake_case__ : int = {
'jukebox': 512,
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_LYRIC_TOKENS_SIZES
_a = ["input_ids", "attention_mask"]
def __init__( self : Tuple , __a : str , __a : str , __a : List[str] , __a : Union[str, Any]=["v3", "v2", "v2"] , __a : List[str]=512 , __a : Any=5 , __a : List[Any]="<|endoftext|>" , **__a : Any , ) ->Dict:
lowerCamelCase_ : int = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
unk_token=__a , n_genres=__a , version=__a , max_n_lyric_tokens=__a , **__a , )
lowerCamelCase_ : Union[str, Any] = version
lowerCamelCase_ : Union[str, Any] = max_n_lyric_tokens
lowerCamelCase_ : Optional[int] = n_genres
with open(__a , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase_ : List[str] = json.load(__a )
with open(__a , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase_ : Union[str, Any] = json.load(__a )
with open(__a , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase_ : Dict = json.load(__a )
lowerCamelCase_ : Union[str, Any] = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowerCamelCase_ : str = oov.replace(r"""\-'""" , r"""\-+'""" )
lowerCamelCase_ : Optional[Any] = regex.compile(__a )
lowerCamelCase_ : List[Any] = {v: k for k, v in self.artists_encoder.items()}
lowerCamelCase_ : List[str] = {v: k for k, v in self.genres_encoder.items()}
lowerCamelCase_ : Any = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _lowerCAmelCase ( self : int , __a : Dict , __a : List[str] , __a : List[Any] ) ->Any:
lowerCamelCase_ : List[str] = [self.artists_encoder.get(__a , 0 ) for artist in list_artists]
for genres in range(len(__a ) ):
lowerCamelCase_ : List[Any] = [self.genres_encoder.get(__a , 0 ) for genre in list_genres[genres]]
lowerCamelCase_ : str = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowerCamelCase_ : Dict = [[self.lyrics_encoder.get(__a , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _lowerCAmelCase ( self : List[Any] , __a : Optional[int] ) ->Tuple:
return list(__a )
def _lowerCAmelCase ( self : Tuple , __a : Dict , __a : List[Any] , __a : List[Any] , **__a : List[Any] ) ->List[Any]:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Dict = self.prepare_for_tokenization(__a , __a , __a )
lowerCamelCase_ : str = self._tokenize(__a )
return artist, genre, lyrics
def _lowerCAmelCase ( self : Any , __a : str , __a : str , __a : str , __a : bool = False ) ->Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowerCamelCase_ : Dict = artists[idx].lower()
lowerCamelCase_ : List[Any] = [genres[idx].lower()]
else:
lowerCamelCase_ : str = self._normalize(artists[idx] ) + """.v2"""
lowerCamelCase_ : int = [
self._normalize(__a ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowerCamelCase_ : Tuple = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
lowerCamelCase_ : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
lowerCamelCase_ : List[Any] = {vocab[index]: index + 1 for index in range(len(__a ) )}
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : List[str] = len(__a ) + 1
lowerCamelCase_ : int = self.vocab
lowerCamelCase_ : Any = {v: k for k, v in self.vocab.items()}
lowerCamelCase_ : Tuple = """"""
else:
lowerCamelCase_ : int = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
lowerCamelCase_ : Any = self._run_strip_accents(__a )
lowerCamelCase_ : str = lyrics.replace("""\\""" , """\n""" )
lowerCamelCase_ : List[str] = self.out_of_vocab.sub("""""" , __a ), [], []
return artists, genres, lyrics
def _lowerCAmelCase ( self : Optional[Any] , __a : int ) ->Union[str, Any]:
lowerCamelCase_ : Union[str, Any] = unicodedata.normalize("""NFD""" , __a )
lowerCamelCase_ : Tuple = []
for char in text:
lowerCamelCase_ : Dict = unicodedata.category(__a )
if cat == "Mn":
continue
output.append(__a )
return "".join(__a )
def _lowerCAmelCase ( self : Tuple , __a : str ) ->str:
lowerCamelCase_ : str = (
[chr(__a ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(__a ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(__a ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
lowerCamelCase_ : Optional[Any] = frozenset(__a )
lowerCamelCase_ : Union[str, Any] = re.compile(r"""_+""" )
lowerCamelCase_ : Any = """""".join([c if c in accepted else """_""" for c in text.lower()] )
lowerCamelCase_ : Optional[Any] = pattern.sub("""_""" , __a ).strip("""_""" )
return text
def _lowerCAmelCase ( self : Any , __a : List[str] ) ->str:
return " ".join(__a )
def _lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any] , __a : Optional[Union[str, TensorType]] = None , __a : bool = False ) ->int:
# Convert to TensorType
if not isinstance(__a , __a ):
lowerCamelCase_ : Tuple = TensorType(__a )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
lowerCamelCase_ : int = tf.constant
lowerCamelCase_ : Union[str, Any] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
lowerCamelCase_ : Tuple = torch.tensor
lowerCamelCase_ : Optional[int] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
lowerCamelCase_ : Any = jnp.array
lowerCamelCase_ : Dict = _is_jax
else:
lowerCamelCase_ : List[str] = np.asarray
lowerCamelCase_ : List[Any] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowerCamelCase_ : Union[str, Any] = [inputs]
if not is_tensor(__a ):
lowerCamelCase_ : Union[str, Any] = as_tensor(__a )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self : List[Any] , __a : Tuple , __a : List[str] , __a : str="" , __a : Optional[int]="pt" ) ->BatchEncoding:
lowerCamelCase_ : List[str] = [0, 0, 0]
lowerCamelCase_ : List[str] = [artist] * len(self.version )
lowerCamelCase_ : str = [genres] * len(self.version )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Optional[Any] = self.tokenize(__a , __a , __a )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = self._convert_token_to_id(__a , __a , __a )
lowerCamelCase_ : Dict = [-INFINITY] * len(full_tokens[-1] )
lowerCamelCase_ : Optional[int] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__a )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def _lowerCAmelCase ( self : Dict , __a : str , __a : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ : Any = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__a ) )
lowerCamelCase_ : Tuple = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__a ) )
lowerCamelCase_ : Optional[int] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__a ) )
return (artists_file, genres_file, lyrics_file)
def _lowerCAmelCase ( self : List[str] , __a : List[str] , __a : List[Any] , __a : Tuple ) ->List[Any]:
lowerCamelCase_ : List[Any] = self.artists_decoder.get(__a )
lowerCamelCase_ : List[Any] = [self.genres_decoder.get(__a ) for genre in genres_index]
lowerCamelCase_ : Any = [self.lyrics_decoder.get(__a ) for character in lyric_index]
return artist, genres, lyrics
| 278
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case__ : List[Any] = '\nimport os\n'
snake_case__ : List[str] = '\ndef foo():\n import os\n return False\n'
snake_case__ : List[Any] = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case__ : List[str] = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case__ : Optional[Any] = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case__ : Tuple = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case__ : str = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case__ : Optional[Any] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case__ : Any = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case__ : Union[str, Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case__ : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , A__ )
def __lowerCamelCase ( A__ : Dict , A__ : int ) -> List[Any]:
lowerCamelCase_ : List[str] = os.path.join(A__ , """test_file.py""" )
with open(A__ , """w""" ) as _tmp_file:
_tmp_file.write(A__ )
lowerCamelCase_ : str = get_imports(A__ )
assert parsed_imports == ["os"]
| 278
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A__ ( _lowercase ):
lowercase = ['''pixel_values''']
def __init__( self : Union[str, Any] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BICUBIC , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : bool = True , **a : Optional[Any] , ):
'''simple docstring'''
super().__init__(**A_ )
lowerCAmelCase__ : int = size if size is not None else {'shortest_edge': 224}
lowerCAmelCase__ : int = get_size_dict(A_ , default_to_square=A_ )
lowerCAmelCase__ : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCAmelCase__ : int = get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' )
lowerCAmelCase__ : Tuple = do_resize
lowerCAmelCase__ : Union[str, Any] = size
lowerCAmelCase__ : Dict = resample
lowerCAmelCase__ : Dict = do_center_crop
lowerCAmelCase__ : Dict = crop_size
lowerCAmelCase__ : Optional[Any] = do_rescale
lowerCAmelCase__ : int = rescale_factor
lowerCAmelCase__ : Optional[Any] = do_normalize
lowerCAmelCase__ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ : Optional[Any] = do_convert_rgb
def _lowerCamelCase ( self : List[str] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCAmelCase__ : Any = get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def _lowerCamelCase ( self : str , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ):
'''simple docstring'''
lowerCAmelCase__ : int = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def _lowerCamelCase ( self : Optional[int] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ):
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def _lowerCamelCase ( self : List[str] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ):
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def _lowerCamelCase ( self : Dict , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : int = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : bool = None , a : Optional[Union[str, TensorType]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , **a : Dict , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Union[str, Any] = size if size is not None else self.size
lowerCAmelCase__ : Any = get_size_dict(A_ , param_name='size' , default_to_square=A_ )
lowerCAmelCase__ : Dict = resample if resample is not None else self.resample
lowerCAmelCase__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Any = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Tuple = get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ )
lowerCAmelCase__ : Any = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : Tuple = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ : str = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ : Any = [convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ : Dict = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCAmelCase__ : Union[str, Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
lowerCAmelCase__ : List[str] = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
lowerCAmelCase__ : str = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
lowerCAmelCase__ : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
lowerCAmelCase__ : List[str] = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCAmelCase__ : List[Any] = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 700
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __magic_name__ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'LayoutLMv3ImageProcessor'
lowercase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Optional[int] , a : Union[str, Any]=None , a : Optional[Any]=None , **a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowerCAmelCase__ : int = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self : List[Any] , a : List[Any] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : str , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
lowerCAmelCase__ : List[str] = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
lowerCAmelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ : List[str] = features['words']
lowerCAmelCase__ : List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
lowerCAmelCase__ : Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCAmelCase__ : List[str] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] )
lowerCAmelCase__ : List[str] = images
return encoded_inputs
def _lowerCamelCase ( self : Any , a : List[str] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(a )} and {len(a )}''' )
return images_with_overflow
def _lowerCamelCase ( self : Union[str, Any] , *a : Optional[Any] , **a : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def _lowerCamelCase ( self : Tuple , *a : List[str] , **a : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor
| 69
| 0
|
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def a_ ( ) -> Optional[Any]:
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' ,type=_lowerCAmelCase ,default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' ,type=_lowerCAmelCase ,default=5 )
parser.add_argument('--batch_size' ,type=_lowerCAmelCase ,default=6 )
parser.add_argument('--gradient_accumulation_steps' ,type=_lowerCAmelCase ,default=1 )
parser.add_argument('--freeze' ,type=_lowerCAmelCase ,default=_lowerCAmelCase )
parser.add_argument('--learning_rate' ,type=_lowerCAmelCase ,default=5E-4 )
parser.add_argument('--seed' ,type=_lowerCAmelCase ,default=0 )
parser.add_argument('--lr_scheduler_type' ,type=_lowerCAmelCase ,default='cosine' )
parser.add_argument('--num_warmup_steps' ,type=_lowerCAmelCase ,default=10 )
parser.add_argument('--weight_decay' ,type=_lowerCAmelCase ,default=0.01 )
parser.add_argument('--output_dir' ,type=_lowerCAmelCase ,default='./results' )
return parser.parse_args()
_UpperCamelCase = load('accuracy')
def a_ ( _lowerCAmelCase ) -> List[str]:
__lowerCamelCase ,__lowerCamelCase : List[str] = eval_pred
__lowerCamelCase : List[Any] = np.argmax(_lowerCAmelCase ,axis=1 )
return metric.compute(predictions=_lowerCAmelCase ,references=_lowerCAmelCase )
class lowerCamelCase_ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : str , _a : List[Any] ) -> None:
super().__init__()
__lowerCamelCase : Union[str, Any] = trainer
def _lowercase ( self : Dict , _a : Tuple , _a : List[str] , _a : Optional[int] , **_a : Union[str, Any] ) -> Optional[Any]:
if control.should_evaluate:
__lowerCamelCase : List[Any] = deepcopy(_a )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def a_ ( ) -> List[Any]:
__lowerCamelCase : Union[str, Any] = get_args()
set_seed(args.seed )
__lowerCamelCase : Tuple = load_dataset('codeparrot/codecomplex' ,split='train' )
__lowerCamelCase : Optional[int] = dataset.train_test_split(test_size=0.2 )
__lowerCamelCase : List[str] = train_test['test'].train_test_split(test_size=0.5 )
__lowerCamelCase : Dict = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
__lowerCamelCase : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
__lowerCamelCase : Tuple = tokenizer.eos_token
__lowerCamelCase : Dict = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt ,num_labels=7 )
__lowerCamelCase : List[str] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__lowerCamelCase : str = False
__lowerCamelCase : Optional[Any] = ClassLabel(num_classes=7 ,names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(_lowerCAmelCase ):
__lowerCamelCase : Union[str, Any] = tokenizer(example['src'] ,truncation=_lowerCAmelCase ,max_length=1024 )
__lowerCamelCase : Union[str, Any] = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__lowerCamelCase : Optional[int] = train_test_validation.map(
_lowerCAmelCase ,batched=_lowerCAmelCase ,remove_columns=train_test_validation['train'].column_names ,)
__lowerCamelCase : List[str] = DataCollatorWithPadding(tokenizer=_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = TrainingArguments(
output_dir=args.output_dir ,learning_rate=args.learning_rate ,lr_scheduler_type=args.lr_scheduler_type ,evaluation_strategy='epoch' ,save_strategy='epoch' ,logging_strategy='epoch' ,per_device_train_batch_size=args.batch_size ,per_device_eval_batch_size=args.batch_size ,num_train_epochs=args.num_epochs ,gradient_accumulation_steps=args.gradient_accumulation_steps ,weight_decay=0.01 ,metric_for_best_model='accuracy' ,run_name='complexity-java' ,report_to='wandb' ,)
__lowerCamelCase : Dict = Trainer(
model=_lowerCAmelCase ,args=_lowerCAmelCase ,train_dataset=tokenized_datasets['train'] ,eval_dataset=tokenized_datasets['valid'] ,tokenizer=_lowerCAmelCase ,data_collator=_lowerCAmelCase ,compute_metrics=_lowerCAmelCase ,)
print('Training...' )
trainer.add_callback(CustomCallback(_lowerCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 459
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : int = (DDPMParallelScheduler,)
def snake_case__ ( self :Any , **lowercase :str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowercase )
return config
def snake_case__ ( self :Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase )
def snake_case__ ( self :List[Any] ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def snake_case__ ( self :Any ) -> List[str]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase )
def snake_case__ ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase )
def snake_case__ ( self :Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def snake_case__ ( self :str ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , )
def snake_case__ ( self :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def snake_case__ ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowercase )
def snake_case__ ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def snake_case__ ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = len(lowercase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
SCREAMING_SNAKE_CASE = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE = samplea.shape[0]
SCREAMING_SNAKE_CASE = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE = torch.arange(lowercase )[0:3, None].repeat(1 , lowercase )
SCREAMING_SNAKE_CASE = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE = scheduler.batch_step_no_noise(lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowercase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 11_53.18_33 ) < 1e-2
assert abs(result_mean.item() - 0.50_05 ) < 1e-3
def snake_case__ ( self :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = len(lowercase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(lowercase ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
SCREAMING_SNAKE_CASE = pred_prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowercase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2
assert abs(result_mean.item() - 0.33_72 ) < 1e-3
def snake_case__ ( self :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = len(lowercase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(lowercase ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
SCREAMING_SNAKE_CASE = pred_prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowercase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2
assert abs(result_mean.item() - 0.26_31 ) < 1e-3
def snake_case__ ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowercase )
SCREAMING_SNAKE_CASE = scheduler.timesteps
for i, timestep in enumerate(lowercase ):
if i == len(lowercase ) - 1:
SCREAMING_SNAKE_CASE = -1
else:
SCREAMING_SNAKE_CASE = timesteps[i + 1]
SCREAMING_SNAKE_CASE = scheduler.previous_timestep(lowercase )
SCREAMING_SNAKE_CASE = prev_t.item()
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowercase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowercase )
def snake_case__ ( self :Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = [1_0_0, 8_7, 5_0, 1, 0]
SCREAMING_SNAKE_CASE = len(lowercase )
with self.assertRaises(lowercase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowercase , timesteps=lowercase )
def snake_case__ ( self :str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowercase )
| 201
| 0
|
"""simple docstring"""
import random
def UpperCAmelCase ( A : list , A : List[Any] ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = [], [], []
for element in data:
if element < pivot:
less.append(A )
elif element > pivot:
greater.append(A )
else:
equal.append(A )
return less, equal, greater
def UpperCAmelCase ( A : list , A : int ):
'''simple docstring'''
if index >= len(A ) or index < 0:
return None
_UpperCAmelCase = items[random.randint(0 , len(A ) - 1 )]
_UpperCAmelCase = 0
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = _partition(A , A )
_UpperCAmelCase = len(A )
_UpperCAmelCase = len(A )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(A , A )
# must be in larger
else:
return quick_select(A , index - (m + count) )
| 24
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = BarthezTokenizer
_UpperCAmelCase = BarthezTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def lowerCamelCase_ ( self ) -> Optional[int]:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case )
_UpperCAmelCase = tokenizer
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case ) , 101122 )
def lowerCamelCase_ ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(snake_case )
_UpperCAmelCase = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
# fmt: off
_UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
| 24
| 1
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A:
def __init__( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : List[str]=3 , __UpperCamelCase : List[Any]=4 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Dict=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : str=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : int=9_9 , __UpperCamelCase : str=3_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : str=4 , __UpperCamelCase : Tuple=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Tuple=5_1_2 , __UpperCamelCase : Tuple=1_6 , __UpperCamelCase : str=2 , __UpperCamelCase : Optional[int]=0.02 , __UpperCamelCase : Tuple=6 , __UpperCamelCase : Dict=6 , __UpperCamelCase : Any=3 , __UpperCamelCase : str=4 , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[Any]=1_0_0_0 , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = coordinate_size
lowerCamelCase_ = shape_size
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase_ = text_seq_length
lowerCamelCase_ = (image_size // patch_size) ** 2 + 1
lowerCamelCase_ = self.text_seq_length + self.image_seq_length
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
lowerCamelCase_ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase_ = bbox[i, j, 3]
lowerCamelCase_ = bbox[i, j, 1]
lowerCamelCase_ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase_ = bbox[i, j, 2]
lowerCamelCase_ = bbox[i, j, 0]
lowerCamelCase_ = tmp_coordinate
lowerCamelCase_ = tf.constant(__UpperCamelCase )
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCamelCase_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ):
lowerCamelCase_ = TFLayoutLMvaModel(config=__UpperCamelCase )
# text + image
lowerCamelCase_ = model(__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase )
lowerCamelCase_ = model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , training=__UpperCamelCase , )
lowerCamelCase_ = model(__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase_ = model(__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase_ = model({"""pixel_values""": pixel_values} , training=__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : List[str] ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFLayoutLMvaForSequenceClassification(config=__UpperCamelCase )
lowerCamelCase_ = model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Tuple ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFLayoutLMvaForTokenClassification(config=__UpperCamelCase )
lowerCamelCase_ = model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
lowerCamelCase_ = 2
lowerCamelCase_ = TFLayoutLMvaForQuestionAnswering(config=__UpperCamelCase )
lowerCamelCase_ = model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.prepare_config_and_inputs()
((lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_)) = config_and_inputs
lowerCamelCase_ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class __A( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
return True
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any]=False ):
lowerCamelCase_ = copy.deepcopy(__UpperCamelCase )
if model_class in get_values(__UpperCamelCase ):
lowerCamelCase_ = {
k: tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__UpperCamelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__UpperCamelCase ):
lowerCamelCase_ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCamelCase ):
lowerCamelCase_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
lowerCamelCase_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCamelCase ):
lowerCamelCase_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCamelCase ):
lowerCamelCase_ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = TFLayoutLMvaModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Dict ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__UpperCamelCase )
if getattr(__UpperCamelCase , """hf_compute_loss""" , __UpperCamelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
lowerCamelCase_ = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
lowerCamelCase_ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__UpperCamelCase )[0]
]
lowerCamelCase_ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowerCamelCase_ = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
lowerCamelCase_ = prepared_for_class.pop("""input_ids""" )
lowerCamelCase_ = model(__UpperCamelCase , **__UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowerCamelCase_ = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
lowerCamelCase_ = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
lowerCamelCase_ = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowerCamelCase_ = -1_0_0
lowerCamelCase_ = tf.convert_to_tensor(__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , **__UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowerCamelCase_ = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowerCamelCase_ = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
# Get keys that were added with the _prepare_for_class function
lowerCamelCase_ = prepared_for_class.keys() - inputs_dict.keys()
lowerCamelCase_ = inspect.signature(model.call ).parameters
lowerCamelCase_ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowerCamelCase_ = {0: """input_ids"""}
for label_key in label_keys:
lowerCamelCase_ = signature_names.index(__UpperCamelCase )
lowerCamelCase_ = label_key
lowerCamelCase_ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowerCamelCase_ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowerCamelCase_ = prepared_for_class[value]
lowerCamelCase_ = tuple(__UpperCamelCase )
# Send to model
lowerCamelCase_ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase__ ( self : List[Any] ):
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : str ):
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Union[str, Any] ):
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : List[str] ):
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Union[str, Any] ):
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@slow
def lowercase__ ( self : int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFLayoutLMvaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCAmelCase ( ) -> str:
lowerCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class __A( unittest.TestCase ):
@cached_property
def lowercase__ ( self : int ):
return LayoutLMvaImageProcessor(apply_ocr=__UpperCamelCase ) if is_vision_available() else None
@slow
def lowercase__ ( self : Any ):
lowerCamelCase_ = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""tf""" ).pixel_values
lowerCamelCase_ = tf.constant([[1, 2]] )
lowerCamelCase_ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
lowerCamelCase_ = model(input_ids=__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase )
# verify the logits
lowerCamelCase_ = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , __UpperCamelCase )
lowerCamelCase_ = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 272
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowercase = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowercase = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowercase = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowercase = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowercase = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowercase = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowercase = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def __lowerCAmelCase ( ) -> List[str]:
lowerCamelCase_ , lowerCamelCase_ = randrange(len(UpperCAmelCase__ ) ), randrange(len(UpperCAmelCase__ ) )
lowerCamelCase_ = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
lowerCamelCase_ , lowerCamelCase_ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCAmelCase ( UpperCAmelCase__ : int = 1_0_0 ) -> Optional[int]:
return (generate_random_hand() for _ in range(UpperCAmelCase__ ))
@pytest.mark.parametrize("""hand, expected""" , UpperCAmelCase__ )
def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
assert PokerHand(UpperCAmelCase__ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCAmelCase__ )
def __lowerCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> int:
assert PokerHand(UpperCAmelCase__ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , UpperCAmelCase__ )
def __lowerCAmelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase_ = PokerHand(UpperCAmelCase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , UpperCAmelCase__ )
def __lowerCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict ) -> str:
assert PokerHand(UpperCAmelCase__ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCAmelCase__ )
def __lowerCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> Tuple:
assert PokerHand(UpperCAmelCase__ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , UpperCAmelCase__ )
def __lowerCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] ) -> Any:
assert PokerHand(UpperCAmelCase__ ).compare_with(PokerHand(UpperCAmelCase__ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def __lowerCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict ) -> List[str]:
assert PokerHand(UpperCAmelCase__ ).compare_with(PokerHand(UpperCAmelCase__ ) ) == expected
def __lowerCAmelCase ( ) -> Tuple:
lowerCamelCase_ = [PokerHand(UpperCAmelCase__ ) for hand in SORTED_HANDS]
lowerCamelCase_ = poker_hands.copy()
shuffle(UpperCAmelCase__ )
lowerCamelCase_ = chain(sorted(UpperCAmelCase__ ) )
for index, hand in enumerate(UpperCAmelCase__ ):
assert hand == poker_hands[index]
def __lowerCAmelCase ( ) -> List[Any]:
# Test that five high straights are compared correctly.
lowerCamelCase_ = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=UpperCAmelCase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCAmelCase ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowerCamelCase_ = PokerHand("""2C 4S AS 3D 5C""" )
lowerCamelCase_ = True
lowerCamelCase_ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCAmelCase ( ) -> List[Any]:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowerCamelCase_ = 0
lowerCamelCase_ = os.path.abspath(os.path.dirname(UpperCAmelCase__ ) )
lowerCamelCase_ = os.path.join(UpperCAmelCase__ , """poker_hands.txt""" )
with open(UpperCAmelCase__ ) as file_hand:
for line in file_hand:
lowerCamelCase_ = line[:1_4].strip()
lowerCamelCase_ = line[1_5:].strip()
lowerCamelCase_ , lowerCamelCase_ = PokerHand(UpperCAmelCase__ ), PokerHand(UpperCAmelCase__ )
lowerCamelCase_ = player.compare_with(UpperCAmelCase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 272
| 1
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__a : Dict = logging.get_logger(__name__)
__a : Optional[int] = """Hello world! cécé herlolip"""
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowercase__ : Tuple = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
roberta.eval() # disable dropout
lowercase__ : Dict = roberta.model.encoder.sentence_encoder
lowercase__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=5_14 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
lowercase__ : Any = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE_ ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase__ : Tuple = roberta_sent_encoder.embed_tokens.weight
lowercase__ : Any = roberta_sent_encoder.embed_positions.weight
lowercase__ : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowercase__ : Tuple = roberta_sent_encoder.layer_norm.weight
lowercase__ : Optional[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase__ : Any = model.roberta.encoder.layer[i]
lowercase__ : Dict = roberta_sent_encoder.layers[i]
lowercase__ : int = layer.attention
lowercase__ : int = roberta_layer.self_attn_layer_norm.weight
lowercase__ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
lowercase__ : List[str] = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowercase__ : Tuple = roberta_layer.self_attn.q_proj.weight
lowercase__ : List[Any] = roberta_layer.self_attn.q_proj.bias
lowercase__ : str = roberta_layer.self_attn.k_proj.weight
lowercase__ : Dict = roberta_layer.self_attn.k_proj.bias
lowercase__ : List[str] = roberta_layer.self_attn.v_proj.weight
lowercase__ : int = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowercase__ : List[str] = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowercase__ : Optional[int] = roberta_layer.self_attn.out_proj.weight
lowercase__ : Dict = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowercase__ : Optional[Any] = roberta_layer.final_layer_norm.weight
lowercase__ : Optional[Any] = roberta_layer.final_layer_norm.bias
# intermediate
lowercase__ : Tuple = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase__ : Optional[Any] = roberta_layer.fca.weight
lowercase__ : str = roberta_layer.fca.bias
# output
lowercase__ : Any = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase__ : Tuple = roberta_layer.fca.weight
lowercase__ : List[str] = roberta_layer.fca.bias
# end of layer
if classification_head:
lowercase__ : List[Any] = roberta.model.classification_heads["mnli"].dense.weight
lowercase__ : List[str] = roberta.model.classification_heads["mnli"].dense.bias
lowercase__ : Union[str, Any] = roberta.model.classification_heads["mnli"].out_proj.weight
lowercase__ : Any = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
lowercase__ : Tuple = roberta.model.encoder.lm_head.dense.weight
lowercase__ : Tuple = roberta.model.encoder.lm_head.dense.bias
lowercase__ : List[str] = roberta.model.encoder.lm_head.layer_norm.weight
lowercase__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
lowercase__ : Union[str, Any] = roberta.model.encoder.lm_head.weight
lowercase__ : Tuple = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase__ : Dict = roberta.encode(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) # batch of size 1
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE_ )[0]
if classification_head:
lowercase__ : List[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(SCREAMING_SNAKE_CASE_ ) )
else:
lowercase__ : Optional[Any] = roberta.model(SCREAMING_SNAKE_CASE_ )[0]
print(our_output.shape ,their_output.shape )
lowercase__ : List[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowercase__ : Optional[Any] = torch.allclose(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(SCREAMING_SNAKE_CASE_ ).mkdir(parents=SCREAMING_SNAKE_CASE_ ,exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__a : int = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 701
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : int = logging.get_logger(__name__)
__a : Tuple = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Optional[int] = """visual_bert"""
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=512 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Any = max_position_embeddings
lowercase__ : str = hidden_size
lowercase__ : Optional[int] = visual_embedding_dim
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : List[str] = initializer_range
lowercase__ : Tuple = type_vocab_size
lowercase__ : int = layer_norm_eps
lowercase__ : Union[str, Any] = bypass_transformer
lowercase__ : Dict = special_visual_initialize
| 298
| 0
|
'''simple docstring'''
import pprint
import requests
UpperCAmelCase_ : Union[str, Any] = '''https://zenquotes.io/api'''
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def _UpperCamelCase ()-> str:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = random_quotes()
pprint.pprint(response)
| 24
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
lowercase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 508
| 0
|
from math import loga
def UpperCamelCase__ ( lowerCAmelCase__ ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = torch.load(lowerCAmelCase__ ,map_location="""cpu""" )
lowercase = Namespace(**checkpoint["""cfg"""]["""model"""] )
lowercase = checkpoint["""model"""]
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowercase = {key.replace("""decoder""" ,"""model""" ): val for key, val in state_dict.items()}
lowercase = XGLMConfig(
vocab_size=lowerCAmelCase__ ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""gelu""" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,)
lowercase = XGLMForCausalLM(lowerCAmelCase__ )
lowercase = model.load_state_dict(lowerCAmelCase__ ,strict=lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args()
__SCREAMING_SNAKE_CASE : Optional[int] =convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 72
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17
|
"""simple docstring"""
A : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A : Dict = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A : str = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def snake_case__ ( _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
assert len(str(_snake_case ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCamelCase__ = year // 1_00
UpperCamelCase__ = (5 * (century % 4) + 2) % 7
UpperCamelCase__ = year % 1_00
UpperCamelCase__ = centurian % 12
UpperCamelCase__ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCamelCase__ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCamelCase__ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 516
| 0
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = '''▁'''
__lowercase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__lowercase = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__lowercase = {
'''facebook/m2m100_418M''': 1_024,
}
# fmt: off
__lowercase = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ['''input_ids''', '''attention_mask''']
lowercase__ = []
lowercase__ = []
def __init__( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any="<s>" , UpperCamelCase__ : List[Any]="</s>" , UpperCamelCase__ : Union[str, Any]="</s>" , UpperCamelCase__ : List[Any]="<pad>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="m2m100" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , UpperCamelCase__ : str=8 , **UpperCamelCase__ : List[Any] , ) -> None:
'''simple docstring'''
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(UpperCamelCase__ )
for lang_code in fairseq_language_code
if self.get_lang_token(UpperCamelCase__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , language_codes=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=UpperCamelCase__ , **UpperCamelCase__ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(UpperCamelCase__ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(UpperCamelCase__ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(UpperCamelCase__ ): self.encoder_size + i for i, lang_code in enumerate(UpperCamelCase__ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(UpperCamelCase__ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else '''en'''
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : str ) -> None:
'''simple docstring'''
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(UpperCamelCase__ , self.encoder[self.unk_token] )
def UpperCAmelCase_ ( self : Any , UpperCamelCase__ : int ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(UpperCamelCase__ , self.unk_token )
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
__UpperCamelCase =[]
__UpperCamelCase =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones
def UpperCAmelCase_ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__UpperCamelCase ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self : List[str] , UpperCamelCase__ : Dict ) -> None:
'''simple docstring'''
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCamelCase =Path(UpperCamelCase__ )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
__UpperCamelCase =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , UpperCamelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , UpperCamelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (str(UpperCamelCase__ ), str(UpperCamelCase__ ))
def UpperCAmelCase_ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro" , **UpperCamelCase__ : Tuple , ) -> BatchEncoding:
'''simple docstring'''
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[str] ) -> int:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =self.get_lang_id(UpperCamelCase__ )
__UpperCamelCase =tgt_lang_id
return inputs
def UpperCAmelCase_ ( self : List[str] ) -> Any:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : str ) -> None:
'''simple docstring'''
__UpperCamelCase =self.get_lang_token(UpperCamelCase__ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def UpperCAmelCase_ ( self : Any , UpperCamelCase__ : str ) -> None:
'''simple docstring'''
__UpperCamelCase =self.get_lang_token(UpperCamelCase__ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : str ) -> int:
'''simple docstring'''
__UpperCamelCase =self.get_lang_token(UpperCamelCase__ )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : Dict[str, Any] ):
"""simple docstring"""
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**__UpperCamelCase )
spm.Load(str(__UpperCamelCase ) )
return spm
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
with open(__UpperCamelCase , '''r''' ) as f:
return json.load(__UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : str ):
"""simple docstring"""
with open(__UpperCamelCase , '''w''' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase , indent=2 )
| 296
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _lowercase :
"""simple docstring"""
lowercase__ = LEDConfig
lowercase__ = {}
lowercase__ = '''gelu'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : int=37 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=20 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Tuple=4 , ) -> str:
'''simple docstring'''
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =eos_token_id
__UpperCamelCase =pad_token_id
__UpperCamelCase =bos_token_id
__UpperCamelCase =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__UpperCamelCase =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__UpperCamelCase =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase =tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__UpperCamelCase =prepare_led_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tf.concat(
[tf.zeros_like(UpperCamelCase__ )[:, :-1], tf.ones_like(UpperCamelCase__ )[:, -1:]] , axis=-1 , )
__UpperCamelCase =global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ) -> Any:
'''simple docstring'''
__UpperCamelCase =TFLEDModel(config=UpperCamelCase__ ).get_decoder()
__UpperCamelCase =inputs_dict['''input_ids''']
__UpperCamelCase =input_ids[:1, :]
__UpperCamelCase =inputs_dict['''attention_mask'''][:1, :]
__UpperCamelCase =1
# first forward pass
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
__UpperCamelCase , __UpperCamelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase =tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase =output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if attention_mask is None:
__UpperCamelCase =tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCamelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowercase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowercase__ = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =tf.zeros_like(inputs_dict['''attention_mask'''] )
__UpperCamelCase =2
__UpperCamelCase =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__UpperCamelCase =True
__UpperCamelCase =self.model_tester.seq_length
__UpperCamelCase =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCamelCase__ : Tuple ):
__UpperCamelCase =outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCamelCase__ : Dict ):
__UpperCamelCase =[t.numpy() for t in outputs.encoder_attentions]
__UpperCamelCase =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCamelCase =len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase =True
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
return tf.constant(__UpperCamelCase , dtype=tf.intaa )
__lowercase = 1e-4
@slow
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
__UpperCamelCase =_long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =_long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =prepare_led_inputs_dict(model.config , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =model(**UpperCamelCase__ )[0]
__UpperCamelCase =(1, 1024, 768)
self.assertEqual(output.shape , UpperCamelCase__ )
# change to expected output here
__UpperCamelCase =tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-3 )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
__UpperCamelCase =_long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =_long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =prepare_led_inputs_dict(model.config , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =model(**UpperCamelCase__ )[0]
__UpperCamelCase =(1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , UpperCamelCase__ )
# change to expected output here
__UpperCamelCase =tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-3 , rtol=1E-3 )
| 296
| 1
|
"""simple docstring"""
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Tuple ) -> int:
a_ : Optional[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
a_ : Any = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
a_ : List[Any] = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 473
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 473
| 1
|
"""simple docstring"""
import functools
from typing import Any
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
# Validation
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or len(UpperCamelCase_ ) == 0:
raise ValueError("""the string should be not empty string""" )
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or not all(
isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) > 0 for item in words ):
raise ValueError("""the words should be a list of non-empty strings""" )
# Build trie
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = """WORD_KEEPER"""
for word in words:
__SCREAMING_SNAKE_CASE = trie
for c in word:
if c not in trie_node:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = trie_node[c]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCamelCase_ ) -> bool:
if index == len_string:
return True
__SCREAMING_SNAKE_CASE = trie
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = trie_node.get(string[i] , UpperCamelCase_ )
if trie_node is None:
return False
if trie_node.get(UpperCamelCase_ , UpperCamelCase_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 248
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = word.split()
def justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
__SCREAMING_SNAKE_CASE = max_width - width
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__SCREAMING_SNAKE_CASE = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__SCREAMING_SNAKE_CASE = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__SCREAMING_SNAKE_CASE = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase_ ):
num_spaces_between_words_list[i] += 1
__SCREAMING_SNAKE_CASE = []
for i in range(UpperCamelCase_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
for word in words:
if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase_ )
width += len(UpperCamelCase_ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
# reset new line and new width
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = [word], len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = max_width - width - len(UpperCamelCase_ )
answer.append(""" """.join(UpperCamelCase_ ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 248
| 1
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _UpperCAmelCase ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : list[int] , _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : list[list[int]] , ) -> Dict:
_lowerCAmelCase : Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase_ ) )
] # the reference grid
_lowerCAmelCase : str = 1
_lowerCAmelCase : Tuple = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase_ ) )
] # the action grid
_lowerCAmelCase : int = init[0]
_lowerCAmelCase : List[Any] = init[1]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = g + heuristic[x][y] # cost from starting cell to destination cell
_lowerCAmelCase : List[Any] = [[f, g, x, y]]
_lowerCAmelCase : Tuple = False # flag that is set when search is complete
_lowerCAmelCase : Dict = False # flag set if we can't find expand
while not found and not resign:
if len(lowerCamelCase_ ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_lowerCAmelCase : int = cell.pop()
_lowerCAmelCase : Dict = next_cell[2]
_lowerCAmelCase : str = next_cell[3]
_lowerCAmelCase : str = next_cell[1]
if x == goal[0] and y == goal[1]:
_lowerCAmelCase : str = True
else:
for i in range(len(lowerCamelCase_ ) ): # to try out different valid actions
_lowerCAmelCase : str = x + DIRECTIONS[i][0]
_lowerCAmelCase : Optional[int] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowerCamelCase_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_lowerCAmelCase : Any = g + cost
_lowerCAmelCase : Optional[int] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Any = i
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[Any] = goal[0]
_lowerCAmelCase : Optional[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_lowerCAmelCase : Any = x - DIRECTIONS[action[x][y]][0]
_lowerCAmelCase : List[str] = y - DIRECTIONS[action[x][y]][1]
_lowerCAmelCase : str = xa
_lowerCAmelCase : int = ya
invpath.append([x, y] )
_lowerCAmelCase : List[str] = []
for i in range(len(lowerCamelCase_ ) ):
path.append(invpath[len(lowerCamelCase_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase_ = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase_ = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase_ = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase_ = 99
UpperCamelCase_ = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 384
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowercase__ ):
lowercase : List[str] = ['image_processor', 'tokenizer']
lowercase : Tuple = 'LayoutLMv3ImageProcessor'
lowercase : List[str] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self :str ,_UpperCamelCase :Optional[Any]=None ,_UpperCamelCase :Union[str, Any]=None ,**_UpperCamelCase :Optional[Any] ):
snake_case_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_UpperCamelCase ,)
snake_case_ : Union[str, Any] = kwargs.pop("""feature_extractor""" )
snake_case_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_UpperCamelCase ,_UpperCamelCase )
def __call__( self :int ,_UpperCamelCase :Dict ,_UpperCamelCase :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_UpperCamelCase :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None ,_UpperCamelCase :Union[List[List[int]], List[List[List[int]]]] = None ,_UpperCamelCase :Optional[Union[List[int], List[List[int]]]] = None ,_UpperCamelCase :bool = True ,_UpperCamelCase :Union[bool, str, PaddingStrategy] = False ,_UpperCamelCase :Union[bool, str, TruncationStrategy] = None ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :int = 0 ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,**_UpperCamelCase :Tuple ,):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
snake_case_ : List[Any] = self.image_processor(images=_UpperCamelCase ,return_tensors=_UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
snake_case_ : Any = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case_ : Optional[int] = features["""words"""]
snake_case_ : str = self.tokenizer(
text=text if text is not None else features["""words"""] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features["""boxes"""] ,word_labels=_UpperCamelCase ,add_special_tokens=_UpperCamelCase ,padding=_UpperCamelCase ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase ,stride=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_token_type_ids=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,return_overflowing_tokens=_UpperCamelCase ,return_special_tokens_mask=_UpperCamelCase ,return_offsets_mapping=_UpperCamelCase ,return_length=_UpperCamelCase ,verbose=_UpperCamelCase ,return_tensors=_UpperCamelCase ,**_UpperCamelCase ,)
# add pixel values
snake_case_ : List[str] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
snake_case_ : Optional[Any] = self.get_overflowing_images(_UpperCamelCase ,encoded_inputs["""overflow_to_sample_mapping"""] )
snake_case_ : int = images
return encoded_inputs
def a__ ( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[str] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case_ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F''' {len(_UpperCamelCase )} and {len(_UpperCamelCase )}''' )
return images_with_overflow
def a__ ( self :Tuple ,*_UpperCamelCase :Dict ,**_UpperCamelCase :str ):
return self.tokenizer.batch_decode(*_UpperCamelCase ,**_UpperCamelCase )
def a__ ( self :List[str] ,*_UpperCamelCase :Dict ,**_UpperCamelCase :Union[str, Any] ):
return self.tokenizer.decode(*_UpperCamelCase ,**_UpperCamelCase )
@property
def a__ ( self :Union[str, Any] ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def a__ ( self :Union[str, Any] ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_UpperCamelCase ,)
return self.image_processor_class
@property
def a__ ( self :Tuple ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_UpperCamelCase ,)
return self.image_processor
| 334
| 0
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _UpperCAmelCase ( UpperCamelCase: Sequence[float] , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowerCAmelCase = (low + high) // 2
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = max_subarray(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = max_subarray(UpperCamelCase , mid + 1 , UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = max_cross_sum(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _UpperCAmelCase ( UpperCamelCase: Sequence[float] , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = float("-inf" ), -1
__lowerCAmelCase , __lowerCAmelCase = float("-inf" ), -1
__lowerCAmelCase = 0
for i in range(UpperCamelCase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowerCAmelCase = summ
__lowerCAmelCase = i
__lowerCAmelCase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowerCAmelCase = summ
__lowerCAmelCase = i
return max_left, max_right, (left_sum + right_sum)
def _UpperCAmelCase ( UpperCamelCase: int ):
"""simple docstring"""
__lowerCAmelCase = [randint(1 , UpperCamelCase ) for _ in range(UpperCamelCase )]
__lowerCAmelCase = time.time()
max_subarray(UpperCamelCase , 0 , input_size - 1 )
__lowerCAmelCase = time.time()
return end - start
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__lowerCAmelCase = [time_max_subarray(UpperCamelCase ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(UpperCamelCase , UpperCamelCase ):
print(UpperCamelCase , "\t\t" , UpperCamelCase )
plt.plot(UpperCamelCase , UpperCamelCase )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 716
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class a ( __UpperCAmelCase ):
lowercase_ : str = 'distilbert'
lowercase_ : Any = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : Optional[int] , snake_case__ : int=30_522 , snake_case__ : str=512 , snake_case__ : Tuple=False , snake_case__ : Tuple=6 , snake_case__ : Any=12 , snake_case__ : Dict=768 , snake_case__ : Any=4 * 768 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.0_2 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[Any]=0.2 , snake_case__ : str=0 , **snake_case__ : Dict , ):
"""simple docstring"""
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = sinusoidal_pos_embds
__lowerCAmelCase = n_layers
__lowerCAmelCase = n_heads
__lowerCAmelCase = dim
__lowerCAmelCase = hidden_dim
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation
__lowerCAmelCase = initializer_range
__lowerCAmelCase = qa_dropout
__lowerCAmelCase = seq_classif_dropout
super().__init__(**snake_case__ , pad_token_id=snake_case__ )
class a ( __UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 376
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
return n == n[::-1]
def lowercase ( lowerCAmelCase__ = 1_000_000 ):
lowerCamelCase_ = 0
for i in range(1 ,lowerCAmelCase__ ):
if is_palindrome(lowerCAmelCase__ ) and is_palindrome(bin(lowerCAmelCase__ ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 29
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29
| 1
|
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=13 , __UpperCamelCase : str=[30, 30] , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : int=3 , __UpperCamelCase : int=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Any=32 , __UpperCamelCase : List[str]=5 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Tuple=37 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : List[str]=10 , __UpperCamelCase : Any=0.02 , __UpperCamelCase : str=3 , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Optional[int]=8 , __UpperCamelCase : str=10 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = n_targets
_UpperCAmelCase = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_UpperCAmelCase = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_UpperCAmelCase = num_patches + 1 + self.num_detection_tokens
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
_UpperCAmelCase = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_UpperCAmelCase = []
for i in range(self.batch_size ):
_UpperCAmelCase = {}
_UpperCAmelCase = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__UpperCamelCase )
_UpperCAmelCase = torch.rand(self.n_targets , 4 , device=__UpperCamelCase )
labels.append(__UpperCamelCase )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Tuple ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : str ):
_UpperCAmelCase = YolosModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int ):
_UpperCAmelCase = YolosForObjectDetection(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(pixel_values=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
_UpperCAmelCase = model(pixel_values=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : int = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[str] = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Dict = False
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str=False ):
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_UpperCAmelCase = []
for i in range(self.model_tester.batch_size ):
_UpperCAmelCase = {}
_UpperCAmelCase = torch.ones(
size=(self.model_tester.n_targets,) , device=__UpperCamelCase , dtype=torch.long )
_UpperCAmelCase = torch.ones(
self.model_tester.n_targets , 4 , device=__UpperCamelCase , dtype=torch.float )
labels.append(__UpperCamelCase )
_UpperCAmelCase = labels
return inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = YolosModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] ):
# YOLOS does not use inputs_embeds
pass
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
# in YOLOS, the seq_len is different
_UpperCAmelCase = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_UpperCAmelCase = len(__UpperCamelCase )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = 1
self.assertEqual(out_len + added_hidden_states , len(__UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase__ ( self : Tuple ):
def check_hidden_states_output(__UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : List[str] ):
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# YOLOS has a different seq_length
_UpperCAmelCase = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = YolosModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( ) -> Any:
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(__UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(inputs.pixel_values )
# verify outputs
_UpperCAmelCase = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__UpperCamelCase , )
_UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify postprocessing
_UpperCAmelCase = image_processor.post_process_object_detection(
__UpperCamelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
_UpperCAmelCase = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(__UpperCamelCase )
_UpperCAmelCase = [75, 75, 17, 63, 17]
_UpperCAmelCase = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(__UpperCamelCase )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , __UpperCamelCase , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , __UpperCamelCase )
self.assertTrue(torch.allclose(results["boxes"][0, :] , __UpperCamelCase ) )
| 129
| 0
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_UpperCamelCase = True
from torch.cuda.amp import autocast
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to log verbose messages or not."""} , )
__SCREAMING_SNAKE_CASE = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.999995 , metadata={"""help""": """Decay of gumbel temperature during training."""} )
def UpperCamelCase_( snake_case__: ModelArguments , snake_case__: TrainingArguments ) -> Dict:
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase__ = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase__ = logging.INFO
logger.setLevel(snake_case__ )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__SCREAMING_SNAKE_CASE = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
__SCREAMING_SNAKE_CASE = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__SCREAMING_SNAKE_CASE = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__SCREAMING_SNAKE_CASE = field(
default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = "longest"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__(self , __a ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
UpperCAmelCase__ = self.feature_extractor.pad(
__a , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
UpperCAmelCase__ = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
UpperCAmelCase__ = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase__ = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
UpperCAmelCase__ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase__ = 1
UpperCAmelCase__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__a , min_masks=2 , )
return batch
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , __a=1 , __a=0 , __a=1.0 , **__a ) -> int:
"""simple docstring"""
super().__init__(*__a , **__a )
UpperCAmelCase__ = 0
UpperCAmelCase__ = max_gumbel_temp
UpperCAmelCase__ = min_gumbel_temp
UpperCAmelCase__ = gumbel_temp_decay
def UpperCamelCase__ (self , __a , __a ) -> torch.Tensor:
"""simple docstring"""
model.train()
UpperCAmelCase__ = self._prepare_inputs(__a )
if self.use_amp:
with autocast():
UpperCAmelCase__ = self.compute_loss(__a , __a )
else:
UpperCAmelCase__ = self.compute_loss(__a , __a )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase__ = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(F"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a ).backward()
elif self.use_apex:
with amp.scale_loss(__a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def UpperCamelCase_( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_args_into_dataclasses()
configure_logger(snake_case__ , snake_case__ )
# Downloading and loading a dataset from the hub.
UpperCAmelCase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase__ = DatasetDict()
UpperCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
UpperCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase__ = DatasetDict()
UpperCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
UpperCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=snake_case__ )
def prepare_dataset(snake_case__: List[Any] ):
# check that all files have the correct sampling rate
UpperCAmelCase__ , UpperCAmelCase__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase__ = datasets.map(
snake_case__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
UpperCAmelCase__ = vectorized_datasets.filter(
lambda snake_case__ : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(snake_case__: List[Any] ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase__ = vectorized_datasets.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
UpperCAmelCase__ = WavaVecaForPreTraining(snake_case__ )
UpperCAmelCase__ = DataCollatorForWavaVecaPretraining(model=snake_case__ , feature_extractor=snake_case__ )
UpperCAmelCase__ = WavaVecaPreTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=snake_case__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 146
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.dummy_uncond_unet
UpperCAmelCase__ = KarrasVeScheduler()
UpperCAmelCase__ = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=2 , generator=__a , output_type='numpy' ).images
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=2 , generator=__a , output_type='numpy' , return_dict=__a )[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = 'google/ncsnpp-celebahq-256'
UpperCAmelCase__ = UNetaDModel.from_pretrained(__a )
UpperCAmelCase__ = KarrasVeScheduler()
UpperCAmelCase__ = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=20 , generator=__a , output_type='numpy' ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase__ = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 146
| 1
|
import itertools
import math
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : str = 2
while True:
if is_prime(lowerCAmelCase_):
yield num
num += 1
def __magic_name__ ( lowerCAmelCase_ = 1_0001):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase_))
if __name__ == "__main__":
print(f'''{solution() = }''')
| 73
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
| 1
|
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ (self: Any ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__a : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def UpperCAmelCase__ (self: Dict ) -> Tuple:
'''simple docstring'''
__a : Optional[int] = self.dummy_uncond_unet
__a : List[Any] = PNDMScheduler()
__a : List[str] = PNDMPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pndm.to(_UpperCamelCase )
pndm.set_progress_bar_config(disable=_UpperCamelCase )
__a : List[Any] = torch.manual_seed(0 )
__a : Tuple = pndm(generator=_UpperCamelCase , num_inference_steps=20 , output_type="numpy" ).images
__a : List[Any] = torch.manual_seed(0 )
__a : List[str] = pndm(generator=_UpperCamelCase , num_inference_steps=20 , output_type="numpy" , return_dict=_UpperCamelCase )[0]
__a : Union[str, Any] = image[0, -3:, -3:, -1]
__a : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : str = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ (self: Optional[Any] ) -> Dict:
'''simple docstring'''
__a : Tuple = "google/ddpm-cifar10-32"
__a : Union[str, Any] = UNetaDModel.from_pretrained(_UpperCamelCase )
__a : str = PNDMScheduler()
__a : Optional[int] = PNDMPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pndm.to(_UpperCamelCase )
pndm.set_progress_bar_config(disable=_UpperCamelCase )
__a : List[str] = torch.manual_seed(0 )
__a : List[str] = pndm(generator=_UpperCamelCase , output_type="numpy" ).images
__a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : Any = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 351
|
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
return 1 if input_a == input_a else 0
def UpperCAmelCase_ ( ):
assert xnor_gate(0, 0 ) == 1
assert xnor_gate(0, 1 ) == 0
assert xnor_gate(1, 0 ) == 0
assert xnor_gate(1, 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 151
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=None , __a=True , ) -> int:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 20}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_flip_channel_order
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = MobileViTImageProcessingTester(self)
@property
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , '''do_resize'''))
self.assertTrue(hasattr(__a , '''size'''))
self.assertTrue(hasattr(__a , '''do_center_crop'''))
self.assertTrue(hasattr(__a , '''center_crop'''))
self.assertTrue(hasattr(__a , '''do_flip_channel_order'''))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 78
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['image_processor', 'tokenizer']
lowercase__ = 'OwlViTImageProcessor'
lowercase__ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __a=None , __a=None , **__a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __a , )
_UpperCamelCase = kwargs.pop('''feature_extractor''')
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(__a , __a)
def __call__( self , __a=None , __a=None , __a=None , __a="max_length" , __a="np" , **__a) -> List[str]:
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''')
if text is not None:
if isinstance(__a , __a) or (isinstance(__a , __a) and not isinstance(text[0] , __a)):
_UpperCamelCase = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a)]
elif isinstance(__a , __a) and isinstance(text[0] , __a):
_UpperCamelCase = []
# Maximum number of queries across batch
_UpperCamelCase = max([len(__a) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__a) != max_num_queries:
_UpperCamelCase = t + [''' '''] * (max_num_queries - len(__a))
_UpperCamelCase = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a)
encodings.append(__a)
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''')
if return_tensors == "np":
_UpperCamelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0)
_UpperCamelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCamelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0)
_UpperCamelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCamelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0)
_UpperCamelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCamelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0)
_UpperCamelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
else:
raise ValueError('''Target return tensor type could not be returned''')
_UpperCamelCase = BatchEncoding()
_UpperCamelCase = input_ids
_UpperCamelCase = attention_mask
if query_images is not None:
_UpperCamelCase = BatchEncoding()
_UpperCamelCase = self.image_processor(
__a , return_tensors=__a , **__a).pixel_values
_UpperCamelCase = query_pixel_values
if images is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a) , tensor_type=__a)
def UpperCAmelCase ( self , *__a , **__a) -> str:
'''simple docstring'''
return self.image_processor.post_process(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> Dict:
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> Optional[Any]:
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@property
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , )
return self.image_processor
| 78
| 1
|
import re
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : List[str] = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(__lowercase , __lowercase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 670
|
from __future__ import annotations
def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670
| 1
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , __snake_case : Distribution , __snake_case : Any=None , __snake_case : Any=None , __snake_case : List[Any]=0 ):
lowerCamelCase :Dict = 1.0 if scale is None else scale
lowerCamelCase :Any = 0.0 if loc is None else loc
super().__init__(__snake_case , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__snake_case )] )
@property
def snake_case ( self : Optional[Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def snake_case ( self : Tuple ):
return self.base_dist.variance * self.scale**2
@property
def snake_case ( self : Dict ):
return self.variance.sqrt()
class _lowerCAmelCase ( nn.Module ):
def __init__( self : str , __snake_case : int , __snake_case : Dict[str, int] , __snake_case : Callable[..., Tuple[torch.Tensor]] , **__snake_case : str ):
super().__init__(**__snake_case )
lowerCamelCase :List[Any] = args_dim
lowerCamelCase :Any = nn.ModuleList([nn.Linear(__snake_case , __snake_case ) for dim in args_dim.values()] )
lowerCamelCase :Optional[int] = domain_map
def snake_case ( self : int , __snake_case : torch.Tensor ):
lowerCamelCase :int = [proj(__snake_case ) for proj in self.proj]
return self.domain_map(*__snake_case )
class _lowerCAmelCase ( nn.Module ):
def __init__( self : str , __snake_case : Optional[int] ):
super().__init__()
lowerCamelCase :Union[str, Any] = function
def snake_case ( self : List[str] , __snake_case : List[str] , *__snake_case : str ):
return self.function(__snake_case , *__snake_case )
class _lowerCAmelCase :
_UpperCAmelCase = 4_2
_UpperCAmelCase = 4_2
_UpperCAmelCase = 4_2
def __init__( self : Tuple , __snake_case : int = 1 ):
lowerCamelCase :Any = dim
lowerCamelCase :Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def snake_case ( self : Optional[int] , __snake_case : List[str] ):
if self.dim == 1:
return self.distribution_class(*__snake_case )
else:
return Independent(self.distribution_class(*__snake_case ) , 1 )
def snake_case ( self : str , __snake_case : Tuple , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , ):
lowerCamelCase :Dict = self._base_distribution(__snake_case )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__snake_case , loc=__snake_case , scale=__snake_case , event_dim=self.event_dim )
@property
def snake_case ( self : str ):
return () if self.dim == 1 else (self.dim,)
@property
def snake_case ( self : Any ):
return len(self.event_shape )
@property
def snake_case ( self : Optional[Any] ):
return 0.0
def snake_case ( self : Tuple , __snake_case : int ):
return ParameterProjection(
in_features=__snake_case , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def snake_case ( self : Optional[int] , *__snake_case : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def snake_case ( __snake_case : torch.Tensor ):
return (x + torch.sqrt(torch.square(__snake_case ) + 4.0 )) / 2.0
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {'df': 1, 'loc': 1, 'scale': 1}
_UpperCAmelCase = StudentT
@classmethod
def snake_case ( cls : Any , __snake_case : torch.Tensor , __snake_case : torch.Tensor , __snake_case : torch.Tensor ):
lowerCamelCase :List[str] = cls.squareplus(__snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase :Dict = 2.0 + cls.squareplus(__snake_case )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {'loc': 1, 'scale': 1}
_UpperCAmelCase = Normal
@classmethod
def snake_case ( cls : Optional[int] , __snake_case : torch.Tensor , __snake_case : torch.Tensor ):
lowerCamelCase :Union[str, Any] = cls.squareplus(__snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {'total_count': 1, 'logits': 1}
_UpperCAmelCase = NegativeBinomial
@classmethod
def snake_case ( cls : List[Any] , __snake_case : torch.Tensor , __snake_case : torch.Tensor ):
lowerCamelCase :Dict = cls.squareplus(__snake_case )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def snake_case ( self : Tuple , __snake_case : Optional[int] ):
lowerCamelCase :Tuple = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__snake_case , logits=__snake_case )
else:
return Independent(self.distribution_class(total_count=__snake_case , logits=__snake_case ) , 1 )
def snake_case ( self : int , __snake_case : str , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None ):
lowerCamelCase :int = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 715
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 0
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCamelCase (lowerCAmelCase : str ) -> Any:
if "model" in orig_key:
A = orig_key.replace('model.', '' )
if "norm1" in orig_key:
A = orig_key.replace('norm1', 'attention.output.LayerNorm' )
if "norm2" in orig_key:
A = orig_key.replace('norm2', 'output.LayerNorm' )
if "norm" in orig_key:
A = orig_key.replace('norm', 'LayerNorm' )
if "transformer" in orig_key:
A = orig_key.split('.' )[0].split('_' )[-1]
A = orig_key.replace(f'''transformer_{layer_num}''', f'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
A = orig_key.replace('mha.attn', 'attention.self' )
if "mha" in orig_key:
A = orig_key.replace('mha', 'attention' )
if "W_q" in orig_key:
A = orig_key.replace('W_q', 'self.query' )
if "W_k" in orig_key:
A = orig_key.replace('W_k', 'self.key' )
if "W_v" in orig_key:
A = orig_key.replace('W_v', 'self.value' )
if "ff1" in orig_key:
A = orig_key.replace('ff1', 'intermediate.dense' )
if "ff2" in orig_key:
A = orig_key.replace('ff2', 'output.dense' )
if "ff" in orig_key:
A = orig_key.replace('ff', 'output.dense' )
if "mlm_class" in orig_key:
A = orig_key.replace('mlm.mlm_class', 'cls.predictions.decoder' )
if "mlm" in orig_key:
A = orig_key.replace('mlm', 'cls.predictions.transform' )
if "cls" not in orig_key:
A = 'yoso.' + orig_key
return orig_key
def __UpperCamelCase (lowerCAmelCase : Union[str, Any], lowerCAmelCase : Optional[Any] ) -> Tuple:
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(_A )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
A = val
A = orig_state_dict['cls.predictions.decoder.bias']
A = torch.arange(_A ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[Any] ) -> List[str]:
A = torch.load(_A, map_location='cpu' )['model_state_dict']
A = YosoConfig.from_json_file(_A )
A = YosoForMaskedLM(_A )
A = convert_checkpoint_helper(config.max_position_embeddings, _A )
print(model.load_state_dict(_A ) )
model.eval()
model.save_pretrained(_A )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_UpperCAmelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 699
|
from collections.abc import Sequence
def __UpperCamelCase ( _A , _A = False ):
if not arr:
return 0
lowerCAmelCase_ = 0 if allow_empty_subarrays else float('''-inf''' )
lowerCAmelCase_ = 0.0
for num in arr:
lowerCAmelCase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCAmelCase_ = max(_A , _A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_A = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"{max_subarray_sum(nums) = }")
| 431
| 0
|
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__UpperCAmelCase : Optional[int] = 6
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Union[str, Any] = 1901
__UpperCAmelCase : Dict = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__UpperCAmelCase : int = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__UpperCAmelCase : Dict = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__UpperCAmelCase : Dict = day - days_per_month[month - 2]
if month > 12:
year += 1
__UpperCAmelCase : str = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 701
|
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675
| 0
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = psutil.Process()
__lowerCAmelCase = False
def snake_case ( self ):
__lowerCAmelCase = -1
while True:
__lowerCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def snake_case ( self ):
__lowerCAmelCase = True
__lowerCAmelCase = threading.Thread(target=self.peak_monitor )
__lowerCAmelCase = True
self.thread.start()
def snake_case ( self ):
__lowerCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
A : Optional[int] = PeakCPUMemory()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase = torch.cuda.memory_allocated(_UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase = (torch.cuda.memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
__lowerCAmelCase = (torch.cuda.max_memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
return measures
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(_UpperCamelCase )]:.2f}MiB" )
__lowerCAmelCase = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 636
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A : Dict = get_logger(__name__)
A : Dict = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _UpperCamelCase :
'''simple docstring'''
@add_start_docstrings(__a )
def __call__( self , __a , __a ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _UpperCamelCase :
'''simple docstring'''
@add_start_docstrings(__a )
def __call__( self , __a , __a ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@add_start_docstrings(__a )
def __call__( self , __a , __a , __a , **__a ):
for processor in self:
__lowerCAmelCase = inspect.signature(processor.__call__ ).parameters
if len(__a ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys() )} for "
f"{processor.__class__} are passed to the logits processor." )
__lowerCAmelCase = processor(__a , __a , __a , **__a )
else:
__lowerCAmelCase = processor(__a , __a , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
if not isinstance(__a , __a ) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}" )
__lowerCAmelCase = temperature
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = scores / self.temperature
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a = -float("Inf" ) , __a = 1 ):
if not isinstance(__a , __a ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(__a , __a ) or (min_tokens_to_keep < 1):
raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
__lowerCAmelCase = top_p
__lowerCAmelCase = filter_value
__lowerCAmelCase = min_tokens_to_keep
def __call__( self , __a , __a , __a ):
__lowerCAmelCase , __lowerCAmelCase = lax.top_k(__a , scores.shape[-1] )
__lowerCAmelCase = jnp.full_like(__a , self.filter_value )
__lowerCAmelCase = jax.nn.softmax(__a , axis=-1 ).cumsum(axis=-1 )
__lowerCAmelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__lowerCAmelCase = jnp.roll(__a , 1 )
score_mask |= score_mask.at[:, 0].set(__a )
# min tokens to keep
__lowerCAmelCase = score_mask.at[:, : self.min_tokens_to_keep].set(__a )
__lowerCAmelCase = jnp.where(__a , __a , __a )
__lowerCAmelCase = jax.lax.sort_key_val(__a , __a )[-1]
return next_scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a = -float("Inf" ) , __a = 1 ):
if not isinstance(__a , __a ) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}" )
__lowerCAmelCase = max(__a , __a )
__lowerCAmelCase = filter_value
def __call__( self , __a , __a , __a ):
__lowerCAmelCase , __lowerCAmelCase = scores.shape
__lowerCAmelCase = jnp.full(batch_size * vocab_size , self.filter_value )
__lowerCAmelCase = min(self.top_k , scores.shape[-1] ) # Safety check
__lowerCAmelCase , __lowerCAmelCase = lax.top_k(__a , __a )
__lowerCAmelCase = jnp.broadcast_to((jnp.arange(__a ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__lowerCAmelCase = topk_scores.flatten()
__lowerCAmelCase = topk_indices.flatten() + shift
__lowerCAmelCase = next_scores_flat.at[topk_indices_flat].set(__a )
__lowerCAmelCase = next_scores_flat.reshape(__a , __a )
return next_scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = bos_token_id
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = jnp.full(scores.shape , -float("inf" ) )
__lowerCAmelCase = 1 - jnp.bool_(cur_len - 1 )
__lowerCAmelCase = jnp.where(__a , new_scores.at[:, self.bos_token_id].set(0 ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = max_length
__lowerCAmelCase = eos_token_id
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = jnp.full(scores.shape , -float("inf" ) )
__lowerCAmelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__lowerCAmelCase = jnp.where(__a , new_scores.at[:, self.eos_token_id].set(0 ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a ):
if not isinstance(__a , __a ) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(__a , __a ) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
__lowerCAmelCase = min_length
__lowerCAmelCase = eos_token_id
def __call__( self , __a , __a , __a ):
# create boolean flag to decide if min length penalty should be applied
__lowerCAmelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__lowerCAmelCase = jnp.where(__a , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = list(__a )
__lowerCAmelCase = begin_index
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = 1 - jnp.bool_(cur_len - self.begin_index )
__lowerCAmelCase = jnp.where(__a , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = list(__a )
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = dict(__a )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__lowerCAmelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__lowerCAmelCase = force_token_array.at[index].set(__a )
__lowerCAmelCase = jnp.intaa(__a )
def __call__( self , __a , __a , __a ):
def _force_token(__a ):
__lowerCAmelCase = scores.shape[0]
__lowerCAmelCase = self.force_token_array[generation_idx]
__lowerCAmelCase = jnp.ones_like(__a , dtype=scores.dtype ) * -float("inf" )
__lowerCAmelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__lowerCAmelCase = lax.dynamic_update_slice(__a , __a , (0, current_token) )
return new_scores
__lowerCAmelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__a ) , lambda: scores , ) , )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a , __a ):
__lowerCAmelCase = generate_config.eos_token_id
__lowerCAmelCase = generate_config.no_timestamps_token_id
__lowerCAmelCase = generate_config.no_timestamps_token_id + 1
__lowerCAmelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__a , "max_initial_timestamp_index" ):
__lowerCAmelCase = generate_config.max_initial_timestamp_index
else:
__lowerCAmelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__lowerCAmelCase = model_config.vocab_size
def __call__( self , __a , __a , __a ):
# suppress <|notimestamps|> which is handled by without_timestamps
__lowerCAmelCase = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(__a , __a ):
__lowerCAmelCase = jnp.where((cur_len - self.begin_index) >= 1 , __a , __a )
__lowerCAmelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __a , )
__lowerCAmelCase = jnp.where((cur_len - self.begin_index) < 2 , __a , __a )
__lowerCAmelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __a , __a , )
return jnp.where(
__a , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __a , )
__lowerCAmelCase = jax.vmap(__a )(__a , __a )
__lowerCAmelCase = jnp.where(cur_len == self.begin_index , __a , __a )
__lowerCAmelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __a , )
__lowerCAmelCase = self.timestamp_begin + self.max_initial_timestamp_index
__lowerCAmelCase = jnp.where(
__a , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __a , )
# if sum of probability over timestamps is above any other token, sample timestamp
__lowerCAmelCase = jax.nn.log_softmax(__a , axis=-1 )
def handle_cumulative_probs(__a , __a ):
__lowerCAmelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__lowerCAmelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __a , )
__lowerCAmelCase = jax.vmap(__a )(__a , __a )
return scores
| 636
| 1
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase = 50 ):
'''simple docstring'''
UpperCAmelCase__ : str = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 194
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : str = []
if len(__UpperCamelCase ) == 1:
return [nums.copy()]
for _ in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Tuple = nums.pop(0 )
UpperCAmelCase__ : List[str] = permute(__UpperCamelCase )
for perm in permutations:
perm.append(__UpperCamelCase )
result.extend(__UpperCamelCase )
nums.append(__UpperCamelCase )
return result
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
def backtrack(__UpperCamelCase ):
if start == len(__UpperCamelCase ) - 1:
output.append(nums[:] )
else:
for i in range(__UpperCamelCase , len(__UpperCamelCase ) ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = nums[i], nums[start] # backtrack
UpperCAmelCase__ : List[str] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__UpperCAmelCase = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 194
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __snake_case , __snake_case=13 , __snake_case=3 , __snake_case=224 , __snake_case=30 , __snake_case=400 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=[0.5, 0.5, 0.5] , __snake_case=[0.5, 0.5, 0.5] , ):
_SCREAMING_SNAKE_CASE : Dict = size if size is not None else {"""height""": 18, """width""": 18}
_SCREAMING_SNAKE_CASE : Optional[Any] = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : List[str] = num_channels
_SCREAMING_SNAKE_CASE : List[str] = image_size
_SCREAMING_SNAKE_CASE : Dict = min_resolution
_SCREAMING_SNAKE_CASE : str = max_resolution
_SCREAMING_SNAKE_CASE : Dict = do_resize
_SCREAMING_SNAKE_CASE : Tuple = size
_SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
_SCREAMING_SNAKE_CASE : str = image_mean
_SCREAMING_SNAKE_CASE : Tuple = image_std
def UpperCAmelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase__ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Any = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Tuple = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """image_mean""" ) )
self.assertTrue(hasattr(__snake_case , """image_std""" ) )
self.assertTrue(hasattr(__snake_case , """do_normalize""" ) )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processor
_SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : List[str] = image_processor(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processor
_SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE : List[Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : Any = image_processor(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processor
_SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : Dict = image_processor(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 533
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) <= 1 or n <= 1:
return
insert_next(SCREAMING_SNAKE_CASE__ , n - 1 )
rec_insertion_sort(SCREAMING_SNAKE_CASE__ , n - 1 )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if index >= len(SCREAMING_SNAKE_CASE__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = (
collection[index],
collection[index - 1],
)
insert_next(SCREAMING_SNAKE_CASE__ , index + 1 )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = input('Enter integers separated by spaces: ')
UpperCAmelCase_ : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 533
| 1
|
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
snake_case = (EulerDiscreteScheduler,)
snake_case = 10
def lowerCamelCase__ ( self : Any , **_snake_case : List[str] ) -> Any:
"""simple docstring"""
A_ = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**__a )
return config
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def lowerCamelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCamelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(__a , __a )
A_ = model(__a , __a )
A_ = scheduler.step(__a , __a , __a , generator=__a )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(__a ) )
A_ = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def lowerCamelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(prediction_type="v_prediction" )
A_ = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(__a , __a )
A_ = model(__a , __a )
A_ = scheduler.step(__a , __a , __a , generator=__a )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(__a ) )
A_ = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def lowerCamelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A_ = sample.to(__a )
for t in scheduler.timesteps:
A_ = scheduler.scale_model_input(__a , __a )
A_ = model(__a , __a )
A_ = scheduler.step(__a , __a , __a , generator=__a )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(__a ) )
A_ = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A_ = sample.to(__a )
for t in scheduler.timesteps:
A_ = scheduler.scale_model_input(__a , __a )
A_ = model(__a , __a )
A_ = scheduler.step(__a , __a , __a , generator=__a )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(__a ) )
A_ = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 717
|
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = TransfoXLTokenizer
snake_case = False
snake_case = False
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
A_ = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase__ ( self : str , **_snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
A_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def lowerCamelCase__ ( self : int , _snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
A_ = "<unk> UNwanted , running"
A_ = "<unk> unwanted, running"
return input_text, output_text
def lowerCamelCase__ ( self : Dict ) -> int:
"""simple docstring"""
A_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_snake_case )
A_ = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(_snake_case , ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [0, 4, 8, 7] )
def lowerCamelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
A_ = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
A_ = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
A_ = TransfoXLTokenizer(lower_case=_snake_case )
A_ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
A_ = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(_snake_case ) , _snake_case )
self.assertEqual(tokenizer.convert_tokens_to_string(_snake_case ) , _snake_case )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
A_ = self.get_tokenizer()
A_ = len(_snake_case )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_snake_case ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , "new1" )
| 482
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class snake_case_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[Any] = ["""input_features""", """attention_mask"""]
def __init__( self , __a=80 , __a=1_6000 , __a=80 , __a=0.0 , __a=True , __a=True , __a=True , **__a , ):
"""simple docstring"""
super().__init__(feature_size=a__ , sampling_rate=a__ , padding_value=a__ , **a__ )
A__ = num_mel_bins
A__ = do_ceptral_normalize
A__ = normalize_means
A__ = normalize_vars
A__ = True
def _UpperCAmelCase ( self , __a , ):
"""simple docstring"""
A__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
A__ = torch.from_numpy(a__ ).unsqueeze(0 )
A__ = ta_kaldi.fbank(a__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _UpperCAmelCase ( __a , __a , __a = True , __a = True , __a = 0.0 , ):
"""simple docstring"""
if normalize_means:
A__ = x[:input_length].mean(axis=0 )
A__ = np.subtract(a__ , a__ )
if normalize_vars:
A__ = x[:input_length].std(axis=0 )
A__ = np.divide(a__ , a__ )
if input_length < x.shape[0]:
A__ = padding_value
# make sure array is in float32
A__ = x.astype(np.floataa )
return x
def _UpperCAmelCase ( self , __a , __a = None ):
"""simple docstring"""
A__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(a__ , a__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(a__ , a__ )
]
def __call__( self , __a , __a = False , __a = None , __a = False , __a = None , __a = None , __a = None , __a = None , **__a , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
A__ = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(a__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
A__ = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [raw_speech]
# extract fbank features
A__ = [self._extract_fbank_features(a__ ) for waveform in raw_speech]
# convert into correct format for padding
A__ = BatchFeature({'input_features': features} )
A__ = self.pad(
a__ , padding=a__ , max_length=a__ , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=a__ , **a__ , )
# make sure list is in array format
A__ = padded_inputs.get('input_features' )
if isinstance(input_features[0] , a__ ):
A__ = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
A__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A__ = [np.asarray(a__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
A__ = (
np.array(a__ , dtype=np.intaa )
if self._get_padding_strategies(a__ , max_length=a__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ = self.normalize(
padded_inputs['input_features'] , attention_mask=a__ )
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
| 260
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( A__ , A__ , A__ ):
UpperCamelCase__ = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a__ , a__ , a__ = None , a__ = 5_0_2_5_7 , a__ = 1_0_2_4 , a__ = 7_6_8 , a__ = 1_2 , a__ = 1_2 , a__ = None , a__ = "gelu_new" , a__ = 0.1 , a__ = 0.1 , a__ = 0.1 , a__ = 1e-5 , a__ = 0.0_2 , a__ = True , a__ = True , a__ = False , a__ = False , ):
super().__init__()
A__ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
F" `n_embd`: {n_embd} are not equal.")
A__ = prefix_inner_dim
A__ = prefix_hidden_dim
A__ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ = (
nn.Linear(self.prefix_hidden_dim , a__) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ = GPTaConfig(
vocab_size=a__ , n_positions=a__ , n_embd=a__ , n_layer=a__ , n_head=a__ , n_inner=a__ , activation_function=a__ , resid_pdrop=a__ , embd_pdrop=a__ , attn_pdrop=a__ , layer_norm_epsilon=a__ , initializer_range=a__ , scale_attn_weights=a__ , use_cache=a__ , scale_attn_by_inverse_layer_idx=a__ , reorder_and_upcast_attn=a__ , )
A__ = GPTaLMHeadModel(a__)
def snake_case_ ( self , a__ , a__ , a__ = None , a__ = None , ):
A__ = self.transformer.transformer.wte(a__)
A__ = self.encode_prefix(a__)
A__ = self.decode_prefix(a__)
A__ = torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
A__ = self.get_dummy_token(input_ids.shape[0] , input_ids.device)
A__ = torch.cat((dummy_token, input_ids) , dim=1)
A__ = self.transformer(inputs_embeds=a__ , labels=a__ , attention_mask=a__)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case_ ( self , a__ , a__):
return torch.zeros(a__ , self.prefix_length , dtype=torch.intaa , device=a__)
def snake_case_ ( self , a__):
return self.encode_prefix(a__)
@torch.no_grad()
def snake_case_ ( self , a__ , a__ , a__):
A__ = torch.split(a__ , 1 , dim=0)
A__ = []
A__ = []
for feature in features:
A__ = self.decode_prefix(feature.to(a__)) # back to the clip feature
# Only support beam search for now
A__ , A__ = self.generate_beam(
input_embeds=a__ , device=a__ , eos_token_id=a__)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
A__ = torch.stack(a__)
A__ = torch.stack(a__)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case_ ( self , a__=None , a__=None , a__=None , a__ = 5 , a__ = 6_7 , a__ = 1.0 , a__ = None , ):
A__ = eos_token_id
A__ = None
A__ = None
A__ = torch.ones(a__ , device=a__ , dtype=torch.int)
A__ = torch.zeros(a__ , device=a__ , dtype=torch.bool)
if input_embeds is not None:
A__ = input_embeds
else:
A__ = self.transformer.transformer.wte(a__)
for i in range(a__):
A__ = self.transformer(inputs_embeds=a__)
A__ = outputs.logits
A__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ = logits.softmax(-1).log()
if scores is None:
A__ , A__ = logits.topk(a__ , -1)
A__ = generated.expand(a__ , *generated.shape[1:])
A__ , A__ = next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
A__ = next_tokens
else:
A__ = tokens.expand(a__ , *tokens.shape[1:])
A__ = torch.cat((tokens, next_tokens) , dim=1)
else:
A__ = -float(np.inf)
A__ = 0
A__ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ = scores_sum / seq_lengths[:, None]
A__ , A__ = scores_sum_average.view(-1).topk(a__ , -1)
A__ = next_tokens // scores_sum.shape[1]
A__ = seq_lengths[next_tokens_source]
A__ = next_tokens % scores_sum.shape[1]
A__ = next_tokens.unsqueeze(1)
A__ = tokens[next_tokens_source]
A__ = torch.cat((tokens, next_tokens) , dim=1)
A__ = generated[next_tokens_source]
A__ = scores_sum_average * seq_lengths
A__ = is_stopped[next_tokens_source]
A__ = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
A__ = torch.cat((generated, next_token_embed) , dim=1)
A__ = is_stopped + next_tokens.eq(a__).squeeze()
if is_stopped.all():
break
A__ = scores / seq_lengths
A__ = scores.argsort(descending=a__)
# tokens tensors are already padded to max_seq_length
A__ = [tokens[i] for i in order]
A__ = torch.stack(a__ , dim=0)
A__ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 632
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_UpperCAmelCase = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowercase__ ( self : str )->Optional[int]:
_UpperCAmelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_UpperCAmelCase = get_activation('''gelu''' )
_UpperCAmelCase = get_activation('''gelu_10''' )
_UpperCAmelCase = torch_builtin(UpperCamelCase_ )
_UpperCAmelCase = geluaa(UpperCamelCase_ )
_UpperCAmelCase = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowercase__ ( self : str )->Any:
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(UpperCamelCase_ ):
get_activation('''bogus''' )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowercase__ ( self : List[Any] )->Union[str, Any]:
_UpperCAmelCase = get_activation('''gelu''' )
_UpperCAmelCase = 1
_UpperCAmelCase = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
_UpperCAmelCase = acta.a
| 714
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : Dict = logging.getLogger(__name__)
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_UpperCAmelCase = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_UpperCAmelCase = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
''' function.''' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=f'val_{metric}' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return EarlyStopping(
monitor=f'val_{metric}' , mode='''min''' if '''loss''' in metric else '''max''' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class _a ( pl.Callback):
"""simple docstring"""
def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : str )->Tuple:
_UpperCAmelCase = {F'lr_group_{i}': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCamelCase )
@rank_zero_only
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule , __UpperCamelCase : str , __UpperCamelCase : Tuple=True )->None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase = od / '''test_results.txt'''
_UpperCAmelCase = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
_UpperCAmelCase = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__UpperCamelCase )
generations_file.parent.mkdir(exist_ok=__UpperCamelCase )
with open(__UpperCamelCase , '''a+''' ) as writer:
for key in sorted(__UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(__UpperCamelCase , torch.Tensor ):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F'{key}: {val:.6f}\n'
writer.write(__UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__UpperCamelCase )
@rank_zero_only
def lowercase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Dict )->Union[str, Any]:
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(__UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase__ ( self : str , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule )->List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__UpperCamelCase , __UpperCamelCase , '''test''' )
@rank_zero_only
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : pl.Trainer , __UpperCamelCase : List[str] )->Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 95
| 0
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
def __init__( self :Union[str, Any], snake_case :AutoencoderKL, snake_case :CLIPTextModel, snake_case :CLIPTokenizer, snake_case :UNetaDConditionModel, snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], snake_case :StableDiffusionSafetyChecker, snake_case :CLIPImageProcessor, ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=snake_case, text_encoder=snake_case, tokenizer=snake_case, unet=snake_case, scheduler=snake_case, safety_checker=snake_case, feature_extractor=snake_case, )
def UpperCamelCase__ ( self :str, snake_case :Optional[Union[str, int]] = "auto"):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowercase =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case)
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
self.enable_attention_slicing(snake_case)
@torch.no_grad()
def __call__( self :Optional[int], snake_case :Union[str, List[str]], snake_case :int = 512, snake_case :int = 512, snake_case :int = 50, snake_case :float = 7.5, snake_case :Optional[Union[str, List[str]]] = None, snake_case :Optional[int] = 1, snake_case :float = 0.0, snake_case :Optional[torch.Generator] = None, snake_case :Optional[torch.FloatTensor] = None, snake_case :Optional[str] = "pil", snake_case :bool = True, snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None, snake_case :int = 1, snake_case :Optional[torch.FloatTensor] = None, **snake_case :int, ):
"""simple docstring"""
if isinstance(snake_case, snake_case):
_lowercase =1
elif isinstance(snake_case, snake_case):
_lowercase =len(snake_case)
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(snake_case)}''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case, snake_case) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(snake_case)}.''')
# get prompt text embeddings
_lowercase =self.tokenizer(
snake_case, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt', )
_lowercase =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowercase =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''')
_lowercase =text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_lowercase =self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowercase , _lowercase , _lowercase =text_embeddings.shape
_lowercase =text_embeddings.repeat(1, snake_case, 1)
_lowercase =text_embeddings.view(bs_embed * num_images_per_prompt, snake_case, -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowercase =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowercase =42
if negative_prompt is None:
_lowercase =['']
elif type(snake_case) is not type(snake_case):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(snake_case)} !='''
f''' {type(snake_case)}.''')
elif isinstance(snake_case, snake_case):
_lowercase =[negative_prompt]
elif batch_size != len(snake_case):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(snake_case)}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.')
else:
_lowercase =negative_prompt
_lowercase =text_input_ids.shape[-1]
_lowercase =self.tokenizer(
snake_case, padding='max_length', max_length=snake_case, truncation=snake_case, return_tensors='pt', )
_lowercase =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowercase =uncond_embeddings.shape[1]
_lowercase =uncond_embeddings.repeat(snake_case, snake_case, 1)
_lowercase =uncond_embeddings.view(batch_size * num_images_per_prompt, snake_case, -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase =torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowercase =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowercase =(batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_lowercase =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowercase =torch.randn(
snake_case, generator=snake_case, device='cpu', dtype=snake_case).to(self.device)
_lowercase =torch.randn(snake_case, generator=snake_case, device='cpu', dtype=snake_case).to(
self.device)
else:
_lowercase =torch.randn(
snake_case, generator=snake_case, device=self.device, dtype=snake_case)
_lowercase =torch.randn(snake_case, generator=snake_case, device=self.device, dtype=snake_case)
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
_lowercase =latents_reference.to(self.device)
_lowercase =latents.to(self.device)
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_lowercase =(latents_shape[3] - latents_shape_reference[3]) // 2
_lowercase =(latents_shape[2] - latents_shape_reference[2]) // 2
_lowercase =latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_lowercase =latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_lowercase =0 if dx < 0 else dx
_lowercase =0 if dy < 0 else dy
_lowercase =max(-dx, 0)
_lowercase =max(-dy, 0)
# import pdb
# pdb.set_trace()
_lowercase =latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowercase =self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_lowercase =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowercase ='eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
_lowercase ={}
if accepts_eta:
_lowercase =eta
for i, t in enumerate(self.progress_bar(snake_case)):
# expand the latents if we are doing classifier free guidance
_lowercase =torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowercase =self.scheduler.scale_model_input(snake_case, snake_case)
# predict the noise residual
_lowercase =self.unet(snake_case, snake_case, encoder_hidden_states=snake_case).sample
# perform guidance
if do_classifier_free_guidance:
_lowercase , _lowercase =noise_pred.chunk(2)
_lowercase =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowercase =self.scheduler.step(snake_case, snake_case, snake_case, **snake_case).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case, snake_case, snake_case)
_lowercase =1 / 0.1_8_2_1_5 * latents
_lowercase =self.vae.decode(snake_case).sample
_lowercase =(image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowercase =image.cpu().permute(0, 2, 3, 1).float().numpy()
if self.safety_checker is not None:
_lowercase =self.feature_extractor(self.numpy_to_pil(snake_case), return_tensors='pt').to(
self.device)
_lowercase , _lowercase =self.safety_checker(
images=snake_case, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype))
else:
_lowercase =None
if output_type == "pil":
_lowercase =self.numpy_to_pil(snake_case)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case, nsfw_content_detected=snake_case)
| 181
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : str ='''mra'''
def __init__( self :Any, snake_case :List[str]=5_0265, snake_case :List[Any]=768, snake_case :Optional[Any]=12, snake_case :Optional[Any]=12, snake_case :str=3072, snake_case :Tuple="gelu", snake_case :Optional[int]=0.1, snake_case :int=0.1, snake_case :Any=512, snake_case :Union[str, Any]=1, snake_case :Union[str, Any]=0.0_2, snake_case :List[Any]=1e-5, snake_case :Optional[int]="absolute", snake_case :Optional[int]=4, snake_case :str="full", snake_case :Optional[int]=0, snake_case :List[Any]=0, snake_case :int=1, snake_case :List[Any]=0, snake_case :Dict=2, **snake_case :Dict, ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case, bos_token_id=snake_case, eos_token_id=snake_case, **snake_case)
_lowercase =vocab_size
_lowercase =max_position_embeddings
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =initializer_range
_lowercase =type_vocab_size
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =block_per_row
_lowercase =approx_mode
_lowercase =initial_prior_first_n_blocks
_lowercase =initial_prior_diagonal_n_blocks
| 181
| 1
|
'''simple docstring'''
import argparse
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''docs/source/_static/js/custom.js'''
def a ( UpperCamelCase_ : Any ) -> Tuple:
with open(__snake_case , encoding='utf-8' , newline='\n' ) as f:
snake_case__ =f.readlines()
snake_case__ =0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
snake_case__ =f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(__snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
update_custom_js(args.version)
| 707
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class a__:
def __init__( self ) -> List[Any]:
snake_case__ ={}
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 ) -> Tuple:
if self.graph.get(_UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case__ =[[w, v]]
if not self.graph.get(_UpperCAmelCase ):
snake_case__ =[]
def _lowercase ( self ) -> Optional[int]:
return list(self.graph )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ) -> int:
if s == d:
return []
snake_case__ =[]
snake_case__ =[]
if s == -2:
snake_case__ =list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
snake_case__ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_UpperCAmelCase ) != 0:
snake_case__ =stack[len(_UpperCAmelCase ) - 1]
else:
snake_case__ =ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return visited
def _lowercase ( self , _UpperCAmelCase=-1 ) -> Optional[int]:
if c == -1:
snake_case__ =floor(random() * 1_0000 ) + 10
for i in range(_UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case__ =floor(random() * c ) + 1
if n != i:
self.add_pair(_UpperCAmelCase , _UpperCAmelCase , 1 )
def _lowercase ( self , _UpperCAmelCase=-2 ) -> Optional[Any]:
snake_case__ =deque()
snake_case__ =[]
if s == -2:
snake_case__ =list(self.graph )[0]
d.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
while d:
snake_case__ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowercase ( self , _UpperCAmelCase ) -> List[str]:
snake_case__ =0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _lowercase ( self , _UpperCAmelCase ) -> Optional[int]:
return len(self.graph[u] )
def _lowercase ( self , _UpperCAmelCase=-2 ) -> Dict:
snake_case__ =[]
snake_case__ =[]
if s == -2:
snake_case__ =list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
snake_case__ =s
snake_case__ =[]
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ =node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_UpperCAmelCase ) != 0:
snake_case__ =stack[len(_UpperCAmelCase ) - 1]
else:
snake_case__ =ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return sorted_nodes
def _lowercase ( self ) -> Optional[int]:
snake_case__ =[]
snake_case__ =[]
snake_case__ =list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
snake_case__ =-2
snake_case__ =[]
snake_case__ =s
snake_case__ =False
snake_case__ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ =len(_UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ =True
if len(_UpperCAmelCase ) != 0:
snake_case__ =stack[len(_UpperCAmelCase ) - 1]
else:
snake_case__ =False
indirect_parents.append(_UpperCAmelCase )
snake_case__ =s
snake_case__ =ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return list(_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =[]
snake_case__ =[]
snake_case__ =list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
snake_case__ =-2
snake_case__ =[]
snake_case__ =s
snake_case__ =False
snake_case__ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ =len(_UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ =True
if len(_UpperCAmelCase ) != 0:
snake_case__ =stack[len(_UpperCAmelCase ) - 1]
else:
snake_case__ =False
indirect_parents.append(_UpperCAmelCase )
snake_case__ =s
snake_case__ =ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return False
def _lowercase ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ) -> str:
snake_case__ =time()
self.dfs(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ =time()
return end - begin
def _lowercase ( self , _UpperCAmelCase=-2 ) -> Any:
snake_case__ =time()
self.bfs(_UpperCAmelCase )
snake_case__ =time()
return end - begin
class a__:
def __init__( self ) -> Tuple:
snake_case__ ={}
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 ) -> int:
# check if the u exists
if self.graph.get(_UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case__ =[[w, v]]
# add the other way
if self.graph.get(_UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case__ =[[w, u]]
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_UpperCAmelCase )
# the other way round
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ) -> int:
if s == d:
return []
snake_case__ =[]
snake_case__ =[]
if s == -2:
snake_case__ =list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
snake_case__ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_UpperCAmelCase ) != 0:
snake_case__ =stack[len(_UpperCAmelCase ) - 1]
else:
snake_case__ =ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return visited
def _lowercase ( self , _UpperCAmelCase=-1 ) -> Dict:
if c == -1:
snake_case__ =floor(random() * 1_0000 ) + 10
for i in range(_UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case__ =floor(random() * c ) + 1
if n != i:
self.add_pair(_UpperCAmelCase , _UpperCAmelCase , 1 )
def _lowercase ( self , _UpperCAmelCase=-2 ) -> List[Any]:
snake_case__ =deque()
snake_case__ =[]
if s == -2:
snake_case__ =list(self.graph )[0]
d.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
while d:
snake_case__ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowercase ( self , _UpperCAmelCase ) -> str:
return len(self.graph[u] )
def _lowercase ( self ) -> Any:
snake_case__ =[]
snake_case__ =[]
snake_case__ =list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
snake_case__ =-2
snake_case__ =[]
snake_case__ =s
snake_case__ =False
snake_case__ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ =len(_UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ =True
if len(_UpperCAmelCase ) != 0:
snake_case__ =stack[len(_UpperCAmelCase ) - 1]
else:
snake_case__ =False
indirect_parents.append(_UpperCAmelCase )
snake_case__ =s
snake_case__ =ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return list(_UpperCAmelCase )
def _lowercase ( self ) -> List[str]:
snake_case__ =[]
snake_case__ =[]
snake_case__ =list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
snake_case__ =-2
snake_case__ =[]
snake_case__ =s
snake_case__ =False
snake_case__ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ =len(_UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ =True
if len(_UpperCAmelCase ) != 0:
snake_case__ =stack[len(_UpperCAmelCase ) - 1]
else:
snake_case__ =False
indirect_parents.append(_UpperCAmelCase )
snake_case__ =s
snake_case__ =ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return False
def _lowercase ( self ) -> Optional[Any]:
return list(self.graph )
def _lowercase ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ) -> Any:
snake_case__ =time()
self.dfs(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ =time()
return end - begin
def _lowercase ( self , _UpperCAmelCase=-2 ) -> Union[str, Any]:
snake_case__ =time()
self.bfs(_UpperCAmelCase )
snake_case__ =time()
return end - begin
| 581
| 0
|
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_lowercase = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
_lowercase = f'''https://www.google.com/search?q={query}&num=100'''
_lowercase = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
_lowercase = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
_lowercase = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 356
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356
| 1
|
def __magic_name__ ( lowercase_ ) -> bool:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase_ )
if number < 0:
return False
UpperCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 414
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__a : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="This is a sound of {}." ) -> Optional[int]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
UpperCamelCase = requests.get(SCREAMING_SNAKE_CASE ).content
else:
with open(SCREAMING_SNAKE_CASE , "rb" ) as f:
UpperCamelCase = f.read()
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase = ffmpeg_read(SCREAMING_SNAKE_CASE , self.feature_extractor.sampling_rate )
if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
UpperCamelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(SCREAMING_SNAKE_CASE ) for x in candidate_labels]
UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE )
UpperCamelCase = [text_inputs]
return inputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = model_inputs.pop("candidate_labels" )
UpperCamelCase = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = model_outputs.pop("candidate_labels" )
UpperCamelCase = model_outputs["logits"][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=0 )
UpperCamelCase = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
UpperCamelCase = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 414
| 1
|
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
lowerCAmelCase_ : str = logging.getLogger(__name__)
def _lowerCamelCase (__lowerCamelCase : Optional[int] ) -> Union[str, Any]:
a__ = git.Repo(search_parent_directories=snake_case__ )
a__ = {
"repo_id": str(snake_case__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(snake_case__ , "git_log.json" ) , "w" ) as f:
json.dump(snake_case__ , snake_case__ , indent=4 )
def _lowerCamelCase (__lowerCamelCase : Dict ) -> List[Any]:
if params.n_gpu <= 0:
a__ = 0
a__ = -1
a__ = True
a__ = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
a__ = int(os.environ["WORLD_SIZE"] )
a__ = int(os.environ["N_GPU_NODE"] )
a__ = int(os.environ["RANK"] )
# number of nodes / node ID
a__ = params.world_size // params.n_gpu_per_node
a__ = params.global_rank // params.n_gpu_per_node
a__ = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
a__ = 1
a__ = 0
a__ = 0
a__ = 0
a__ = 1
a__ = 1
a__ = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
a__ = params.node_id == 0 and params.local_rank == 0
a__ = params.n_nodes > 1
# summary
a__ = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def _lowerCamelCase (__lowerCamelCase : List[str] ) -> Optional[int]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 489
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _SCREAMING_SNAKE_CASE :
a_ : Any = BlenderbotSmallConfig
a_ : List[str] = {}
a_ : Any = '''gelu'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , ):
'''simple docstring'''
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =eos_token_id
__UpperCAmelCase =pad_token_id
__UpperCAmelCase =bos_token_id
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
__UpperCAmelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
__UpperCAmelCase =tf.concat([input_ids, eos_tensor] , axis=1)
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase =prepare_blenderbot_small_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
return config, inputs_dict
def A__ (self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =TFBlenderbotSmallModel(config=UpperCAmelCase).get_decoder()
__UpperCAmelCase =inputs_dict['''input_ids''']
__UpperCAmelCase =input_ids[:1, :]
__UpperCAmelCase =inputs_dict['''attention_mask'''][:1, :]
__UpperCAmelCase =inputs_dict['''head_mask''']
__UpperCAmelCase =1
# first forward pass
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase =ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
__UpperCAmelCase =tf.concat([input_ids, next_tokens] , axis=-1)
__UpperCAmelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1)
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase)[0]
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
__UpperCAmelCase =int(ids_tensor((1,) , output_from_past.shape[-1]))
__UpperCAmelCase =output_from_no_past[:, -3:, random_slice_idx]
__UpperCAmelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , rtol=1e-3)
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , ) -> List[str]:
if attention_mask is None:
__UpperCAmelCase =tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCAmelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCAmelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a_ : List[Any] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
a_ : Optional[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
a_ : Tuple = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ : Tuple = True
a_ : List[str] = False
a_ : Union[str, Any] = False
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFBlenderbotSmallModelTester(self)
__UpperCAmelCase =ConfigTester(self , config_class=UpperCAmelCase)
def A__ (self):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase)
@require_tokenizers
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
a_ : str = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
a_ : str = '''facebook/blenderbot_small-90M'''
@cached_property
def A__ (self):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''')
@cached_property
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.tokenizer(self.src_text , return_tensors='''tf''')
__UpperCAmelCase =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCAmelCase , )
__UpperCAmelCase =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCAmelCase)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 132
| 0
|
'''simple docstring'''
from math import factorial
__UpperCAmelCase = {str(d): factorial(d) for d in range(10)}
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(__A ) )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Dict = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __A ) if sum_of_digit_factorial(__A ) == i )
if __name__ == "__main__":
print(F'{solution() = }')
| 692
|
'''simple docstring'''
def lowerCAmelCase_ ( __A : int = 1_00 ):
'''simple docstring'''
snake_case: List[str] = n * (n + 1) * (2 * n + 1) / 6
snake_case: List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 692
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ) -> None:
if start is None:
snake_case__ = 0
if end is None:
snake_case__ = len(__lowerCAmelCase ) - 1
if start >= end:
return
snake_case__ = (start + end) // 2
slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
if sequence[end] < sequence[mid]:
snake_case__ , snake_case__ = sequence[mid], sequence[end]
slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 33
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase__ : int = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ):
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=3 , lowerCAmelCase__=3_2 , lowerCAmelCase__=3 , lowerCAmelCase__=1_0 , lowerCAmelCase__=[8, 1_6, 3_2, 6_4] , lowerCAmelCase__=[1, 1, 2, 1] , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=3 , lowerCAmelCase__=None , lowerCAmelCase__=["stage2", "stage3", "stage4"] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=1 , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embeddings_size
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = out_features
__SCREAMING_SNAKE_CASE = out_indices
__SCREAMING_SNAKE_CASE = num_groups
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = BitModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = BitForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = BitBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BitBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowercase : List[str] = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
__lowercase : str = False
__lowercase : int = False
__lowercase : List[Any] = False
__lowercase : Any = False
__lowercase : List[Any] = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BitModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__)
def snake_case_ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self):
return
@unittest.skip(reason="""Bit does not output attentions""")
def snake_case_ ( self):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""")
def snake_case_ ( self):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""")
def snake_case_ ( self):
pass
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(config=lowerCAmelCase__)
for name, module in model.named_modules():
if isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def snake_case_ ( self):
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__SCREAMING_SNAKE_CASE = layer_type
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
@unittest.skip(reason="""Bit does not use feedforward chunking""")
def snake_case_ ( self):
pass
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = BitModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Dict = (BitBackbone,) if is_torch_available() else ()
__lowercase : List[str] = BitConfig
__lowercase : List[Any] = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BitModelTester(self)
| 248
|
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ = threading.Lock()
__magic_name__ = None
__magic_name__ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
__magic_name__ = logging.WARNING
__magic_name__ = True
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = os.getenv("""TRANSFORMERS_VERBOSITY""" , UpperCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def _lowerCAmelCase ( ):
return __name__.split(""".""" )[0]
def _lowerCAmelCase ( ):
return logging.getLogger(_get_library_name() )
def _lowerCAmelCase ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream.
__SCREAMING_SNAKE_CASE = sys.stderr.flush
# Apply our default configuration to the library root logger.
__SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase ( ):
global _default_handler
with _lock:
if not _default_handler:
return
__SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__SCREAMING_SNAKE_CASE = None
def _lowerCAmelCase ( ):
return log_levels
def _lowerCAmelCase ( UpperCamelCase_ = None ):
if name is None:
__SCREAMING_SNAKE_CASE = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCamelCase_ )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _lowerCAmelCase ( UpperCamelCase_ ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(UpperCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(UpperCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(UpperCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(UpperCamelCase_ )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _lowerCAmelCase ( UpperCamelCase_ ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCamelCase_ )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
__SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase ( ):
_configure_library_root_logger()
__SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
__SCREAMING_SNAKE_CASE = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(UpperCamelCase_ )
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCamelCase_ )
def _lowerCAmelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , UpperCamelCase_ )
if no_advisory_warnings:
return
self.warning(*UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = warning_advice
@functools.lru_cache(UpperCamelCase_ )
def _lowerCAmelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
self.warning(*UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = warning_once
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__): # pylint: disable=unused-argument
__SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self):
return iter(self._iterator)
def __getattr__( self , lowerCAmelCase__):
def empty_fn(*lowerCAmelCase__ , **lowerCAmelCase__): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self):
return self
def __exit__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
return
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCAmelCase__ , **lowerCAmelCase__)
else:
return EmptyTqdm(*lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ = _tqdm_cls()
def _lowerCAmelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def _lowerCAmelCase ( ):
global _tqdm_active
__SCREAMING_SNAKE_CASE = True
hf_hub_utils.enable_progress_bars()
def _lowerCAmelCase ( ):
global _tqdm_active
__SCREAMING_SNAKE_CASE = False
hf_hub_utils.disable_progress_bars()
| 248
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=a_ ):
lowerCamelCase__ = ['''speech''']
def __init__( self :List[Any] , *_lowerCamelCase :str , **_lowerCamelCase :Optional[int] ):
requires_backends(self , ['''speech'''] )
class snake_case ( metaclass=a_ ):
lowerCamelCase__ = ['''speech''']
def __init__( self :List[str] , *_lowerCamelCase :Optional[int] , **_lowerCamelCase :Dict ):
requires_backends(self , ['''speech'''] )
| 674
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
lowerCAmelCase , lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
lowerCAmelCase = controlnet_params
lowerCAmelCase = 'bird'
lowerCAmelCase = jax.device_count()
lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowerCAmelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
lowerCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = shard(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = shard(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe(
prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
lowerCAmelCase , lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
lowerCAmelCase = controlnet_params
lowerCAmelCase = 'Chef in the kitchen'
lowerCAmelCase = jax.device_count()
lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowerCAmelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
lowerCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = shard(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = shard(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe(
prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 284
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : str = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_a : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_a : Union[str, Any] = {
'yjernite/retribert-base-uncased': 512,
}
_a : Tuple = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class a_ ( a ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any = PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = RetriBertTokenizer
A__ : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict="[UNK]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : Union[str, Any]="[PAD]" , UpperCAmelCase__ : Dict="[CLS]" , UpperCAmelCase__ : Optional[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Dict , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
snake_case : int = getattr(UpperCAmelCase__ , normalizer_state.pop('''type''' ) )
snake_case : List[Any] = do_lower_case
snake_case : Union[str, Any] = strip_accents
snake_case : int = tokenize_chinese_chars
snake_case : int = normalizer_class(**UpperCAmelCase__ )
snake_case : Union[str, Any] = do_lower_case
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=None ):
"""simple docstring"""
snake_case : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
snake_case : Tuple = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 84
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = "pix2struct_text_model"
lowerCAmelCase__ : Tuple = ["past_key_values"]
lowerCAmelCase__ : Any = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str ,UpperCamelCase : Any=5_0244 ,UpperCamelCase : List[Any]=768 ,UpperCamelCase : List[Any]=64 ,UpperCamelCase : int=2048 ,UpperCamelCase : Tuple=12 ,UpperCamelCase : List[Any]=12 ,UpperCamelCase : Optional[Any]=32 ,UpperCamelCase : List[str]=128 ,UpperCamelCase : Tuple=0.1 ,UpperCamelCase : str=1e-6 ,UpperCamelCase : Union[str, Any]=1.0 ,UpperCamelCase : List[str]="gelu_new" ,UpperCamelCase : Optional[Any]=0 ,UpperCamelCase : Tuple=False ,UpperCamelCase : Dict=0 ,UpperCamelCase : str=1 ,UpperCamelCase : Dict=False ,UpperCamelCase : Dict=True ,**UpperCamelCase : int ,) -> Union[str, Any]:
_lowercase : str = vocab_size
_lowercase : Tuple = hidden_size
_lowercase : List[str] = d_kv
_lowercase : List[str] = d_ff
_lowercase : Union[str, Any] = num_layers
_lowercase : Optional[int] = num_heads
_lowercase : Optional[Any] = relative_attention_num_buckets
_lowercase : Tuple = relative_attention_max_distance
_lowercase : List[Any] = dropout_rate
_lowercase : int = layer_norm_epsilon
_lowercase : Dict = initializer_factor
_lowercase : Tuple = use_cache
_lowercase : Dict = eos_token_id
_lowercase : Tuple = decoder_start_token_id
# for backwards compatibility
_lowercase : Any = dense_act_fn
super().__init__(
pad_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,decoder_start_token_id=UpperCamelCase ,tie_word_embeddings=UpperCamelCase ,is_decoder=UpperCamelCase ,**UpperCamelCase ,)
@classmethod
def _lowerCamelCase ( cls : Optional[int] ,UpperCamelCase : Union[str, os.PathLike] ,**UpperCamelCase : Dict ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase )
_lowercase , _lowercase : Optional[int] = cls.get_config_dict(UpperCamelCase ,**UpperCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
_lowercase : Dict = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase ,**UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = "pix2struct_vision_model"
def __init__( self : Any ,UpperCamelCase : Optional[Any]=768 ,UpperCamelCase : List[str]=768 ,UpperCamelCase : int=2048 ,UpperCamelCase : Optional[int]=64 ,UpperCamelCase : Dict=12 ,UpperCamelCase : Dict=12 ,UpperCamelCase : int="gelu_new" ,UpperCamelCase : Any=1e-6 ,UpperCamelCase : Optional[Any]=0.0 ,UpperCamelCase : List[Any]=0.0 ,UpperCamelCase : int=1e-10 ,UpperCamelCase : List[str]=1.0 ,UpperCamelCase : Optional[int]=4096 ,UpperCamelCase : List[Any]=32 ,UpperCamelCase : str=128 ,**UpperCamelCase : Any ,) -> Optional[int]:
super().__init__(**UpperCamelCase )
_lowercase : Union[str, Any] = hidden_size
_lowercase : Any = patch_embed_hidden_size
_lowercase : List[str] = d_ff
_lowercase : List[Any] = dropout_rate
_lowercase : str = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = initializer_range
_lowercase : List[str] = initializer_factor
_lowercase : int = attention_dropout
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : Union[str, Any] = dense_act_fn
_lowercase : List[Any] = seq_len
_lowercase : Dict = relative_attention_num_buckets
_lowercase : List[str] = relative_attention_max_distance
_lowercase : Any = d_kv
@classmethod
def _lowerCamelCase ( cls : Dict ,UpperCamelCase : Union[str, os.PathLike] ,**UpperCamelCase : Tuple ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase )
_lowercase , _lowercase : Union[str, Any] = cls.get_config_dict(UpperCamelCase ,**UpperCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
_lowercase : Optional[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase ,**UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = "pix2struct"
lowerCAmelCase__ : Dict = True
def __init__( self : Union[str, Any] ,UpperCamelCase : List[Any]=None ,UpperCamelCase : str=None ,UpperCamelCase : List[str]=1.0 ,UpperCamelCase : List[Any]=0.0_2 ,UpperCamelCase : Optional[int]=False ,UpperCamelCase : Tuple=False ,UpperCamelCase : Optional[int]=True ,**UpperCamelCase : Union[str, Any] ,) -> Optional[Any]:
super().__init__(tie_word_embeddings=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,**UpperCamelCase )
if text_config is None:
_lowercase : int = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
_lowercase : Union[str, Any] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
_lowercase : Dict = PixaStructTextConfig(**UpperCamelCase )
_lowercase : Optional[int] = PixaStructVisionConfig(**UpperCamelCase )
_lowercase : List[str] = self.text_config.decoder_start_token_id
_lowercase : Optional[int] = self.text_config.pad_token_id
_lowercase : str = self.text_config.eos_token_id
_lowercase : List[str] = initializer_factor
_lowercase : Union[str, Any] = initializer_range
_lowercase : List[str] = self.initializer_range
_lowercase : Optional[Any] = self.initializer_range
_lowercase : int = is_vqa
@classmethod
def _lowerCamelCase ( cls : Tuple ,UpperCamelCase : PixaStructTextConfig ,UpperCamelCase : PixaStructVisionConfig ,**UpperCamelCase : int ) -> Optional[int]:
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**UpperCamelCase )
def _lowerCamelCase ( self : int ) -> List[str]:
_lowercase : Dict = copy.deepcopy(self.__dict__ )
_lowercase : Union[str, Any] = self.text_config.to_dict()
_lowercase : List[str] = self.vision_config.to_dict()
_lowercase : List[str] = self.__class__.model_type
return output
| 125
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
# TODO Update this
A = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : str = "esm"
def __init__( self : str ,UpperCamelCase : Tuple=None ,UpperCamelCase : Union[str, Any]=None ,UpperCamelCase : str=None ,UpperCamelCase : str=768 ,UpperCamelCase : List[str]=12 ,UpperCamelCase : Dict=12 ,UpperCamelCase : Any=3072 ,UpperCamelCase : List[str]=0.1 ,UpperCamelCase : int=0.1 ,UpperCamelCase : int=1026 ,UpperCamelCase : int=0.0_2 ,UpperCamelCase : Optional[Any]=1e-12 ,UpperCamelCase : str="absolute" ,UpperCamelCase : Tuple=True ,UpperCamelCase : int=None ,UpperCamelCase : Union[str, Any]=False ,UpperCamelCase : Tuple=False ,UpperCamelCase : Optional[int]=None ,UpperCamelCase : Any=None ,**UpperCamelCase : Dict ,) -> str:
super().__init__(pad_token_id=UpperCamelCase ,mask_token_id=UpperCamelCase ,**UpperCamelCase )
_lowercase : Any = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : str = max_position_embeddings
_lowercase : List[str] = initializer_range
_lowercase : Any = layer_norm_eps
_lowercase : Optional[int] = position_embedding_type
_lowercase : int = use_cache
_lowercase : Dict = emb_layer_norm_before
_lowercase : Optional[int] = token_dropout
_lowercase : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
_lowercase : str = EsmFoldConfig()
elif isinstance(UpperCamelCase ,UpperCamelCase ):
_lowercase : Tuple = EsmFoldConfig(**UpperCamelCase )
_lowercase : str = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
_lowercase : Optional[int] = get_default_vocab_list()
else:
_lowercase : Optional[Any] = vocab_list
else:
_lowercase : Any = None
_lowercase : List[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,'use_esm_attn_map' ,UpperCamelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def _lowerCamelCase ( self : str ) -> Tuple:
_lowercase : List[str] = super().to_dict()
if isinstance(self.esmfold_config ,UpperCamelCase ):
_lowercase : Union[str, Any] = self.esmfold_config.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCAmelCase__ : str = None
lowerCAmelCase__ : bool = True
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : float = 0
lowerCAmelCase__ : bool = True
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : int = 128
lowerCAmelCase__ : "TrunkConfig" = None
def _lowerCamelCase ( self : List[Any] ) -> str:
if self.trunk is None:
_lowercase : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk ,UpperCamelCase ):
_lowercase : List[str] = TrunkConfig(**self.trunk )
def _lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
_lowercase : Any = asdict(self )
_lowercase : Tuple = self.trunk.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCAmelCase__ : int = 48
lowerCAmelCase__ : int = 1_024
lowerCAmelCase__ : int = 128
lowerCAmelCase__ : int = 32
lowerCAmelCase__ : int = 32
lowerCAmelCase__ : int = 32
lowerCAmelCase__ : float = 0
lowerCAmelCase__ : float = 0
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : int = 4
lowerCAmelCase__ : Optional[int] = 128
lowerCAmelCase__ : "StructureModuleConfig" = None
def _lowerCamelCase ( self : Dict ) -> Optional[Any]:
if self.structure_module is None:
_lowercase : Any = StructureModuleConfig()
elif isinstance(self.structure_module ,UpperCamelCase ):
_lowercase : int = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
_lowercase : Any = self.sequence_state_dim // self.sequence_head_width
_lowercase : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _lowerCamelCase ( self : List[Any] ) -> str:
_lowercase : int = asdict(self )
_lowercase : Any = self.structure_module.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCAmelCase__ : int = 384
lowerCAmelCase__ : int = 128
lowerCAmelCase__ : int = 16
lowerCAmelCase__ : int = 128
lowerCAmelCase__ : int = 12
lowerCAmelCase__ : int = 4
lowerCAmelCase__ : int = 8
lowerCAmelCase__ : float = 0.1
lowerCAmelCase__ : int = 8
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : int = 2
lowerCAmelCase__ : int = 7
lowerCAmelCase__ : int = 10
lowerCAmelCase__ : float = 1e-8
lowerCAmelCase__ : float = 1e5
def _lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 125
| 1
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowercase (SCREAMING_SNAKE_CASE_ : Dataset , SCREAMING_SNAKE_CASE_ : Dict[str, str] ) -> int:
SCREAMING_SNAKE_CASE = args.log_outputs
SCREAMING_SNAKE_CASE = '''_'''.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
SCREAMING_SNAKE_CASE = load_metric('wer' )
SCREAMING_SNAKE_CASE = load_metric('cer' )
# compute metrics
SCREAMING_SNAKE_CASE = wer.compute(references=result['target'] , predictions=result['prediction'] )
SCREAMING_SNAKE_CASE = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
SCREAMING_SNAKE_CASE = F'WER: {wer_result}\nCER: {cer_result}'
print(a_ )
with open(F'{dataset_id}_eval_results.txt' , 'w' ) as f:
f.write(a_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
SCREAMING_SNAKE_CASE = F'log_{dataset_id}_predictions.txt'
SCREAMING_SNAKE_CASE = F'log_{dataset_id}_targets.txt'
with open(a_ , 'w' ) as p, open(a_ , 'w' ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
p.write(F'{i}' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'{i}' + '\n' )
t.write(batch['target'] + '\n' )
result.map(a_ , with_indices=a_ )
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
SCREAMING_SNAKE_CASE = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
SCREAMING_SNAKE_CASE = re.sub(a_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
SCREAMING_SNAKE_CASE = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
SCREAMING_SNAKE_CASE = ''' '''.join(text.split(a_ ) )
return text
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
# load dataset
SCREAMING_SNAKE_CASE = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=a_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(args.model_id )
SCREAMING_SNAKE_CASE = feature_extractor.sampling_rate
# resample audio
SCREAMING_SNAKE_CASE = dataset.cast_column('audio' , Audio(sampling_rate=a_ ) )
# load eval pipeline
if args.device is None:
SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else -1
SCREAMING_SNAKE_CASE = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE_ : List[str] ):
SCREAMING_SNAKE_CASE = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
SCREAMING_SNAKE_CASE = prediction['''text''']
SCREAMING_SNAKE_CASE = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
SCREAMING_SNAKE_CASE = dataset.map(a_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(a_ , a_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
lowerCAmelCase__ = parser.parse_args()
main(args)
| 705
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__UpperCamelCase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def lowercase () -> Union[str, Any]:
SCREAMING_SNAKE_CASE = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE = get_cluster_input()
return config
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple=None ) -> int:
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser('config' , description=SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Accelerate config command' , description=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE = args.config_file
else:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(SCREAMING_SNAKE_CASE_ )
else:
config.to_yaml_file(SCREAMING_SNAKE_CASE_ )
print(F'accelerate configuration saved at {config_file}' )
def lowercase () -> str:
SCREAMING_SNAKE_CASE = config_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
config_command(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 327
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCAmelCase = None
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase = {
"""camembert-base""": 5_1_2,
}
UpperCAmelCase = """▁"""
class lowercase ( __snake_case ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = CamembertTokenizer
def __init__(self : str ,SCREAMING_SNAKE_CASE_ : List[str]=None ,SCREAMING_SNAKE_CASE_ : List[Any]=None ,SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" ,SCREAMING_SNAKE_CASE_ : List[Any]="</s>" ,SCREAMING_SNAKE_CASE_ : str="</s>" ,SCREAMING_SNAKE_CASE_ : List[Any]="<s>" ,SCREAMING_SNAKE_CASE_ : int="<unk>" ,SCREAMING_SNAKE_CASE_ : List[str]="<pad>" ,SCREAMING_SNAKE_CASE_ : List[Any]="<mask>" ,SCREAMING_SNAKE_CASE_ : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] ,**SCREAMING_SNAKE_CASE_ : Tuple ,) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE_ ,lstrip=SCREAMING_SNAKE_CASE_ ,rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ ,tokenizer_file=SCREAMING_SNAKE_CASE_ ,bos_token=SCREAMING_SNAKE_CASE_ ,eos_token=SCREAMING_SNAKE_CASE_ ,sep_token=SCREAMING_SNAKE_CASE_ ,cls_token=SCREAMING_SNAKE_CASE_ ,unk_token=SCREAMING_SNAKE_CASE_ ,pad_token=SCREAMING_SNAKE_CASE_ ,mask_token=SCREAMING_SNAKE_CASE_ ,additional_special_tokens=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
lowerCAmelCase = vocab_file
lowerCAmelCase = False if not self.vocab_file else True
def UpperCAmelCase (self : List[str] ,SCREAMING_SNAKE_CASE_ : Any ,SCREAMING_SNAKE_CASE_ : Tuple = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase (self : Union[str, Any] ,SCREAMING_SNAKE_CASE_ : int ,SCREAMING_SNAKE_CASE_ : Dict = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase (self : int ,SCREAMING_SNAKE_CASE_ : List[Any] ,SCREAMING_SNAKE_CASE_ : int = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase = os.path.join(
SCREAMING_SNAKE_CASE_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 535
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = None
if token is not None:
__a = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
__a = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__a = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
__a = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
__a = math.ceil((result['total_count'] - 100) / 100 )
for i in range(__lowerCamelCase ):
__a = requests.get(url + f'''&page={i + 2}''' , headers=__lowerCamelCase ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = None
if token is not None:
__a = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
__a = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
__a = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
__a = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
__a = math.ceil((result['total_count'] - 100) / 100 )
for i in range(__lowerCamelCase ):
__a = requests.get(url + f'''&page={i + 2}''' , headers=__lowerCamelCase ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = None
if token is not None:
__a = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
__a = requests.get(__lowerCamelCase , headers=__lowerCamelCase , allow_redirects=__lowerCamelCase )
__a = result.headers['Location']
__a = requests.get(__lowerCamelCase , allow_redirects=__lowerCamelCase )
__a = os.path.join(__lowerCamelCase , f'''{artifact_name}.zip''' )
with open(__lowerCamelCase , 'wb' ) as fp:
fp.write(response.content )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = []
__a = []
__a = None
with zipfile.ZipFile(__lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowerCamelCase ) as f:
for line in f:
__a = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__a = line[: line.index(': ' )]
__a = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
__a = line[len('FAILED ' ) :]
failed_tests.append(__lowerCamelCase )
elif filename == "job_name.txt":
__a = line
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(__lowerCamelCase )} for `errors` '''
f'''and {len(__lowerCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
' problem.' )
__a = None
if job_name and job_links:
__a = job_links.get(__lowerCamelCase , __lowerCamelCase )
# A list with elements of the form (line of error, error, failed test)
__a = [x + [y] + [job_link] for x, y in zip(__lowerCamelCase , __lowerCamelCase )]
return result
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = []
__a = [os.path.join(__lowerCamelCase , __lowerCamelCase ) for p in os.listdir(__lowerCamelCase ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowerCamelCase , job_links=__lowerCamelCase ) )
return errors
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = Counter()
counter.update([x[1] for x in logs] )
__a = counter.most_common()
__a = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__a = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
__a = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def lowerCAmelCase( __lowerCamelCase ):
__a = test.split('::' )[0]
if test.startswith('tests/models/' ):
__a = test.split('/' )[2]
else:
__a = None
return test
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=None ):
__a = [(x[0], x[1], get_model(x[2] )) for x in logs]
__a = [x for x in logs if x[2] is not None]
__a = {x[2] for x in logs}
__a = {}
for test in tests:
__a = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__a = counter.most_common()
__a = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__a = sum(error_counts.values() )
if n_errors > 0:
__a = {'count': n_errors, 'errors': error_counts}
__a = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def lowerCAmelCase( __lowerCamelCase ):
__a = '| no. | error | status |'
__a = '|-:|:-|:-|'
__a = [header, sep]
for error in reduced_by_error:
__a = reduced_by_error[error]['count']
__a = f'''| {count} | {error[:100]} | |'''
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase ):
__a = '| model | no. of errors | major error | count |'
__a = '|-:|-:|-:|-:|'
__a = [header, sep]
for model in reduced_by_model:
__a = reduced_by_model[model]['count']
__a , __a = list(reduced_by_model[model]['errors'].items() )[0]
__a = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
lowerCamelCase_ : List[str] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCamelCase_ : Any = get_job_links(args.workflow_run_id, token=args.token)
lowerCamelCase_ : Any = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCamelCase_ : int = k.find(""" / """)
lowerCamelCase_ : str = k[index + len(""" / """) :]
lowerCamelCase_ : Union[str, Any] = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCamelCase_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCamelCase_ : Any = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCamelCase_ : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCamelCase_ : int = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCamelCase_ : Optional[int] = reduce_by_error(errors)
lowerCamelCase_ : Optional[int] = reduce_by_model(errors)
lowerCamelCase_ : Any = make_github_table(reduced_by_error)
lowerCamelCase_ : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 559
| 0
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if nth_term == "":
return [""]
lowercase_ : Optional[int] = int(_UpperCamelCase )
lowercase_ : Optional[int] = int(_UpperCamelCase )
lowercase_ : list[str] = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F"""1 / {pow(temp + 1 , int(_UpperCamelCase ) )}""" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input('Enter the last number (nth term) of the P-Series'))
UpperCamelCase__ = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 713
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Union[str, Any] = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ["OwlViTFeatureExtractor"]
_snake_case : Optional[int] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__: str = logging.get_logger(__name__)
UpperCamelCase__: Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__: Dict = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
UpperCamelCase__: Dict = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def snake_case_ ( ) -> str:
UpperCAmelCase : Tuple = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase : Any = bs[:]
UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase : List[Any] = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
def snake_case_ ( _lowerCAmelCase : Any ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = set()
UpperCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase : List[str] = char
return pairs
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , __snake_case : int , __snake_case : Dict , __snake_case : int="replace" , __snake_case : str="<s>" , __snake_case : List[Any]="</s>" , __snake_case : Optional[int]="</s>" , __snake_case : List[Any]="<s>" , __snake_case : Union[str, Any]="<unk>" , __snake_case : Optional[Any]="<pad>" , __snake_case : List[Any]="<mask>" , __snake_case : int=False , **__snake_case : Any , ) -> str:
UpperCAmelCase : Optional[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token
UpperCAmelCase : Optional[int] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
UpperCAmelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token
UpperCAmelCase : Optional[int] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token
UpperCAmelCase : Union[str, Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
UpperCAmelCase : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Optional[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , )
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase : Union[str, Any] = json.load(__snake_case )
UpperCAmelCase : str = {v: k for k, v in self.encoder.items()}
UpperCAmelCase : Any = errors # how to handle errors in decoding
UpperCAmelCase : List[Any] = bytes_to_unicode()
UpperCAmelCase : int = {v: k for k, v in self.byte_encoder.items()}
with open(__snake_case , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase : Optional[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
UpperCAmelCase : Union[str, Any] = {}
UpperCAmelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase : Union[str, Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def A ( self : Tuple ) -> Dict:
return len(self.encoder )
def A ( self : Any ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self : Any , __snake_case : List[str] ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase : Optional[int] = tuple(__snake_case )
UpperCAmelCase : List[str] = get_pairs(__snake_case )
if not pairs:
return token
while True:
UpperCAmelCase : int = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase : int = bigram
UpperCAmelCase : Any = []
UpperCAmelCase : List[str] = 0
while i < len(__snake_case ):
try:
UpperCAmelCase : int = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase : Optional[int] = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase : Optional[int] = tuple(__snake_case )
UpperCAmelCase : Optional[Any] = new_word
if len(__snake_case ) == 1:
break
else:
UpperCAmelCase : List[str] = get_pairs(__snake_case )
UpperCAmelCase : str = ''' '''.join(__snake_case )
UpperCAmelCase : Union[str, Any] = word
return word
def A ( self : List[Any] , __snake_case : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : int = []
for token in re.findall(self.pat , __snake_case ):
UpperCAmelCase : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__snake_case ).split(''' ''' ) )
return bpe_tokens
def A ( self : Any , __snake_case : List[Any] ) -> List[str]:
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def A ( self : Tuple , __snake_case : Any ) -> Any:
return self.decoder.get(__snake_case )
def A ( self : List[str] , __snake_case : List[str] ) -> Dict:
UpperCAmelCase : Tuple = ''''''.join(__snake_case )
UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Any = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase : Dict = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '''\n''' )
UpperCAmelCase : List[str] = 0
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase : Dict = token_index
writer.write(''' '''.join(__snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : List[str] = [self.cls_token_id]
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def A ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Tuple=False , **__snake_case : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[int] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__snake_case ) > 0 and not text[0].isspace()):
UpperCAmelCase : List[Any] = ''' ''' + text
return (text, kwargs)
| 127
| 0
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCamelCase = logging.getLogger(__name__)
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ):
__A : List[str] = self.layer[current_layer](__UpperCAmelCase , __UpperCAmelCase , head_mask[current_layer] )
__A : Union[str, Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , lowerCAmelCase__ , )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase )
__A : int = BertEncoderWithPabee(__UpperCAmelCase )
self.init_weights()
__A : Any = 0
__A : str = 0
__A : Dict = 0
__A : Dict = 0
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : Optional[int] = threshold
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : List[str] = patience
def __UpperCAmelCase( self ):
__A : List[str] = 0
__A : List[Any] = 0
def __UpperCAmelCase( self ):
__A : int = self.inference_layers_num / self.inference_instances_num
__A : List[str] = (
F"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
F" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(__UpperCAmelCase )
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__A : Any = input_ids.size()
elif inputs_embeds is not None:
__A : Union[str, Any] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__A : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__A : str = torch.ones(__UpperCAmelCase , device=__UpperCAmelCase )
if token_type_ids is None:
__A : str = torch.zeros(__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__A : torch.Tensor = self.get_extended_attention_mask(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__A : Dict = encoder_hidden_states.size()
__A : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__A : int = torch.ones(__UpperCAmelCase , device=__UpperCAmelCase )
__A : Any = self.invert_attention_mask(__UpperCAmelCase )
else:
__A : List[str] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__A : Optional[int] = self.get_head_mask(__UpperCAmelCase , self.config.num_hidden_layers )
__A : Union[str, Any] = self.embeddings(
input_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase )
__A : Optional[int] = embedding_output
if self.training:
__A : Any = []
for i in range(self.config.num_hidden_layers ):
__A : str = self.encoder.adaptive_forward(
__UpperCAmelCase , current_layer=__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase )
__A : Union[str, Any] = self.pooler(__UpperCAmelCase )
__A : List[Any] = output_layers[i](output_dropout(__UpperCAmelCase ) )
res.append(__UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__A : Tuple = self.encoder(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__A : Dict = self.pooler(encoder_outputs[0] )
__A : List[Any] = [output_layers[self.config.num_hidden_layers - 1](__UpperCAmelCase )]
else:
__A : Dict = 0
__A : str = None
__A : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__A : List[Any] = self.encoder.adaptive_forward(
__UpperCAmelCase , current_layer=__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase )
__A : Dict = self.pooler(__UpperCAmelCase )
__A : Optional[int] = output_layers[i](__UpperCAmelCase )
if regression:
__A : Tuple = logits.detach()
if patient_result is not None:
__A : List[str] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__A : Dict = 0
else:
__A : List[str] = logits.detach().argmax(dim=1 )
if patient_result is not None:
__A : Optional[int] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__UpperCAmelCase ) ):
patient_counter += 1
else:
__A : int = 0
__A : Tuple = logits
if patient_counter == self.patience:
break
__A : List[str] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , lowerCAmelCase__ , )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase )
__A : Any = config.num_labels
__A : Dict = BertModelWithPabee(__UpperCAmelCase )
__A : List[Any] = nn.Dropout(config.hidden_dropout_prob )
__A : Dict = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
__A : int = self.bert(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__A : Optional[int] = (logits[-1],)
if labels is not None:
__A : Union[str, Any] = None
__A : Tuple = 0
for ix, logits_item in enumerate(__UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__A : Any = MSELoss()
__A : Dict = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__A : Dict = CrossEntropyLoss()
__A : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__A : Tuple = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__A : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 700
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """gpt_neox"""
def __init__( self , __UpperCAmelCase=50_432 , __UpperCAmelCase=6_144 , __UpperCAmelCase=44 , __UpperCAmelCase=64 , __UpperCAmelCase=24_576 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.25 , __UpperCAmelCase=10_000 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=2_048 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__A : Optional[int] = vocab_size
__A : List[Any] = max_position_embeddings
__A : Any = hidden_size
__A : str = num_hidden_layers
__A : List[str] = num_attention_heads
__A : Dict = intermediate_size
__A : List[Any] = hidden_act
__A : Tuple = rotary_pct
__A : Optional[int] = rotary_emb_base
__A : int = attention_dropout
__A : Optional[int] = hidden_dropout
__A : List[Any] = classifier_dropout
__A : Optional[Any] = initializer_range
__A : Optional[int] = layer_norm_eps
__A : str = use_cache
__A : Optional[int] = tie_word_embeddings
__A : Any = use_parallel_residual
__A : List[Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def __UpperCAmelCase( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"got {self.rope_scaling}" )
__A : Dict = self.rope_scaling.get("type" , __UpperCAmelCase )
__A : Dict = self.rope_scaling.get("factor" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 387
| 0
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCAmelCase_ )-> Tuple:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[str]:
"""simple docstring"""
return max(metric_fn(UpperCAmelCase_ , UpperCAmelCase_ ) for gt in ground_truths )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = []
if args.gold_data_mode == "qa":
UpperCamelCase = pd.read_csv(UpperCAmelCase_ , sep="\t" , header=UpperCAmelCase_ )
for answer_list in data[1]:
UpperCamelCase = ast.literal_eval(UpperCAmelCase_ )
answers.append(UpperCAmelCase_ )
else:
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = [[reference] for reference in references]
UpperCamelCase = UpperCamelCase = UpperCamelCase = 0
for prediction, ground_truths in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
total += 1
em += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
fa += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = 100.0 * em / total
UpperCamelCase = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
UpperCamelCase = args.k
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = UpperCamelCase = 0
for hypo, reference in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = set(hypo.split("\t" )[:k] )
UpperCamelCase = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCamelCase = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
def strip_title(UpperCAmelCase_ ):
if title.startswith("\"" ):
UpperCamelCase = title[1:]
if title.endswith("\"" ):
UpperCamelCase = title[:-1]
return title
UpperCamelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ , return_tensors="pt" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , )["input_ids"].to(args.device )
UpperCamelCase = rag_model.rag.question_encoder(UpperCAmelCase_ )
UpperCamelCase = question_enc_outputs[0]
UpperCamelCase = rag_model.retriever(
UpperCAmelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
UpperCamelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCamelCase = []
for docs in all_docs:
UpperCamelCase = [strip_title(UpperCAmelCase_ ) for title in docs["title"]]
provenance_strings.append("\t".join(UpperCAmelCase_ ) )
return provenance_strings
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
with torch.no_grad():
UpperCamelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ , return_tensors="pt" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
UpperCamelCase = inputs_dict.input_ids.to(args.device )
UpperCamelCase = inputs_dict.attention_mask.to(args.device )
UpperCamelCase = rag_model.generate( # rag_model overwrites generate
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCAmelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCamelCase = rag_model.retriever.generator_tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
if args.print_predictions:
for q, a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info("Q: {} - A: {}".format(UpperCAmelCase_ , UpperCAmelCase_ ) )
return answers
def lowerCamelCase__ ( )-> Any:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=UpperCAmelCase_ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=UpperCAmelCase_ , choices=["exact", "compressed", "legacy"] , type=UpperCAmelCase_ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=UpperCAmelCase_ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=UpperCAmelCase_ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=UpperCAmelCase_ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=UpperCAmelCase_ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=UpperCAmelCase_ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=UpperCAmelCase_ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=UpperCAmelCase_ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=UpperCAmelCase_ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=UpperCAmelCase_ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
UpperCamelCase = parser.parse_args()
UpperCamelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def lowerCamelCase__ ( UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
UpperCamelCase = {}
if args.model_type is None:
UpperCamelCase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
UpperCamelCase = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
UpperCamelCase = args.n_docs
if args.index_name is not None:
UpperCamelCase = args.index_name
if args.index_path is not None:
UpperCamelCase = args.index_path
else:
UpperCamelCase = BartForConditionalGeneration
UpperCamelCase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , UpperCAmelCase_ )
UpperCamelCase = get_scores if args.eval_mode == "e2e" else get_precision_at_k
UpperCamelCase = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(UpperCAmelCase_ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
UpperCamelCase = RagRetriever.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCamelCase = model_class.from_pretrained(UpperCAmelCase_ , retriever=UpperCAmelCase_ , **UpperCAmelCase_ )
model.retriever.init_retrieval()
else:
UpperCamelCase = model_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
UpperCamelCase = []
for line in tqdm(UpperCAmelCase_ ):
questions.append(line.strip() )
if len(UpperCAmelCase_ ) == args.eval_batch_size:
UpperCamelCase = evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
preds_file.write("\n".join(UpperCAmelCase_ ) + "\n" )
preds_file.flush()
UpperCamelCase = []
if len(UpperCAmelCase_ ) > 0:
UpperCamelCase = evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
preds_file.write("\n".join(UpperCAmelCase_ ) )
preds_file.flush()
score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = get_args()
main(args)
| 554
|
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def lowerCamelCase__ ( )-> Tuple:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=UpperCAmelCase_ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=UpperCAmelCase_ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=UpperCAmelCase_ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=UpperCAmelCase_ , default=10_00 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=UpperCAmelCase_ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=UpperCAmelCase_ , default=5_12 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=UpperCAmelCase_ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
UpperCamelCase = parser.parse_args()
return args
def lowerCamelCase__ ( UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
def fn(UpperCAmelCase_ ):
return tokenizer(examples["text"] )
return fn
def lowerCamelCase__ ( UpperCAmelCase_ )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = []
for i in range(len(tokenized_data["input_ids"] ) ):
UpperCamelCase = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
UpperCamelCase = tf.train.Features(feature=UpperCAmelCase_ )
UpperCamelCase = tf.train.Example(features=UpperCAmelCase_ )
UpperCamelCase = example.SerializeToString()
records.append(UpperCAmelCase_ )
return records
def lowerCamelCase__ ( UpperCAmelCase_ )-> int:
"""simple docstring"""
UpperCamelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCamelCase = min(len(UpperCAmelCase_ ) , args.limit )
UpperCamelCase = dataset.select(range(UpperCAmelCase_ ) )
print(F"Limiting the dataset to {args.limit} entries." )
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCamelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
else:
UpperCamelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCamelCase = tokenize_function(UpperCAmelCase_ )
UpperCamelCase = dataset.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(UpperCAmelCase_ ):
# Concatenate all texts.
UpperCamelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCamelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCamelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCamelCase = {
k: [t[i : i + args.max_length] for i in range(0 , UpperCAmelCase_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCamelCase = dataset_tokenized.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , batch_size=10_00 , num_proc=4 )
UpperCamelCase = 0
UpperCamelCase = 0
for shard in range(0 , len(UpperCAmelCase_ ) , args.shard_size ):
UpperCamelCase = grouped_dataset[shard : shard + args.shard_size]
UpperCamelCase = len(dataset_snapshot["input_ids"] )
UpperCamelCase = os.path.join(UpperCAmelCase_ , F"dataset-{shard_count}-{records_containing}.tfrecord" )
UpperCamelCase = get_serialized_examples(UpperCAmelCase_ )
with tf.io.TFRecordWriter(UpperCAmelCase_ ) as out_file:
for i in range(len(UpperCAmelCase_ ) ):
UpperCamelCase = serialized_examples[i]
out_file.write(UpperCAmelCase_ )
print("Wrote file {} containing {} records".format(UpperCAmelCase_ , UpperCAmelCase_ ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , "w" ) as f:
print(F"Total {args.split} records: {total_records}" , file=UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = parse_args()
main(args)
| 554
| 1
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase_ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowercase_ = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowercase_ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def lowerCAmelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
__magic_name__ : str = None
# source code of `config_class`
__magic_name__ : Optional[int] = inspect.getsource(UpperCAmelCase )
__magic_name__ : List[Any] = _re_checkpoint.findall(UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
__magic_name__ : Optional[int] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__magic_name__ : List[str] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
__magic_name__ : Tuple = ckpt_name
break
return checkpoint
def lowerCAmelCase ( ) ->List[str]:
"""simple docstring"""
__magic_name__ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__magic_name__ : List[Any] = get_checkpoint_from_config_class(UpperCAmelCase )
__magic_name__ : Optional[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
__magic_name__ : Tuple = '''\n'''.join(sorted(UpperCAmelCase ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 336
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowercase_ = logging.get_logger(__name__)
@dataclass
class A__ :
lowerCamelCase__ : str =field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
lowerCamelCase__ : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowerCamelCase__ : int =field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase__ : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
__magic_name__ : Any = self.task_name.lower()
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Optional[Any] ="train"
lowerCamelCase__ : Optional[int] ="dev"
lowerCamelCase__ : List[str] ="test"
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : GlueDataTrainingArguments
lowerCamelCase__ : str
lowerCamelCase__ : List[InputFeatures]
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = Split.train , lowerCamelCase = None , ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , lowerCamelCase , )
__magic_name__ : Optional[int] = args
__magic_name__ : str = glue_processors[args.task_name]()
__magic_name__ : Optional[Any] = glue_output_modes[args.task_name]
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
__magic_name__ : List[str] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
__magic_name__ : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
__magic_name__ : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__magic_name__ , __magic_name__ : Tuple = label_list[2], label_list[1]
__magic_name__ : Union[str, Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__ : str = cached_features_file + '''.lock'''
with FileLock(lowerCamelCase ):
if os.path.exists(lowerCamelCase ) and not args.overwrite_cache:
__magic_name__ : List[Any] = time.time()
__magic_name__ : Optional[Any] = torch.load(lowerCamelCase )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
__magic_name__ : List[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__magic_name__ : str = self.processor.get_test_examples(args.data_dir )
else:
__magic_name__ : str = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__magic_name__ : List[Any] = examples[:limit_length]
__magic_name__ : Optional[Any] = glue_convert_examples_to_features(
lowerCamelCase , lowerCamelCase , max_length=args.max_seq_length , label_list=lowerCamelCase , output_mode=self.output_mode , )
__magic_name__ : Optional[Any] = time.time()
torch.save(self.features , lowerCamelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> List[str]:
"""simple docstring"""
return len(self.features )
def __getitem__( self , lowerCamelCase ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
return self.label_list
| 336
| 1
|
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCamelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCamelCase = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = 0.0
for i, j in zip(_UpperCAmelCase , _UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_UpperCAmelCase , _UpperCAmelCase ) else 0.0
UpperCAmelCase_ = n_correct / len(_UpperCAmelCase )
return {
"accuracy": accuracy,
}
| 82
|
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 691
| 0
|
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =[False] * len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =[-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : List[Any], UpperCamelCase__ : str ):
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__, 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__, 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
a_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 665
|
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowercase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@slow
@require_torch
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 665
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.