code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase__ : str = sys.version_info >= (3, 10)
def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class A :
snake_case__ :int
snake_case__ :float
snake_case__ :str
snake_case__ :bool
@dataclass
class A :
snake_case__ :int = 42
snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :Optional[bool] = None
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'titi'
snake_case__ :Optional[int] = 'toto'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'titi'
snake_case__ :str = 'toto'
snake_case__ :int = 42
@dataclass
class A :
snake_case__ :BasicEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.foo )
@dataclass
class A :
snake_case__ :MixedTypeEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class A :
snake_case__ :Optional[int] = None
snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :Optional[str] = None
snake_case__ :Optional[List[str]] = list_field(default=[] )
snake_case__ :Optional[List[int]] = list_field(default=[] )
@dataclass
class A :
snake_case__ :List[int] = list_field(default=[] )
snake_case__ :List[int] = list_field(default=[1, 2, 3] )
snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A :
snake_case__ :List[int] = field()
snake_case__ :str = field()
snake_case__ :BasicEnum = field()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class A :
snake_case__ :int
snake_case__ :"BasicEnum" = field()
snake_case__ :"Optional[bool]" = None
snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} )
snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :bool | None = None
@dataclass
class A :
snake_case__ :int | None = None
snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :str | None = None
snake_case__ :list[str] | None = list_field(default=[] )
snake_case__ :list[int] | None = list_field(default=[] )
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
lowerCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
@dataclass
class A :
snake_case__ :Literal["titi", "toto", 42] = "toto"
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" )
expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ )
lowerCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 48
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _lowerCAmelCase(a : list[float] ) -> Any:
return np.maximum(0 , a )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 255
| 0
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def A ( UpperCAmelCase ):
return EnvironmentCommand()
def A ( UpperCAmelCase ):
return EnvironmentCommand(args.accelerate_config_file )
class _a( __lowercase ):
@staticmethod
def lowercase ( __snake_case ) -> Any:
'''simple docstring'''
_snake_case : Dict = parser.add_parser("env" )
download_parser.set_defaults(func=__snake_case )
download_parser.add_argument(
"--accelerate-config_file" , default=__snake_case , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=__snake_case )
def __init__( self , __snake_case , *__snake_case ) -> None:
'''simple docstring'''
_snake_case : Dict = accelerate_config_file
def lowercase ( self ) -> Optional[int]:
'''simple docstring'''
_snake_case : Any = "not installed"
if is_safetensors_available():
import safetensors
_snake_case : Union[str, Any] = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
_snake_case : Tuple = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
_snake_case : Optional[int] = "not installed"
_snake_case : Tuple = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_snake_case : List[Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__snake_case ):
_snake_case : Union[str, Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
_snake_case : Any = (
"\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(__snake_case , __snake_case )
else f"""\t{accelerate_config}"""
)
_snake_case : Optional[int] = "not installed"
_snake_case : Any = "NA"
if is_torch_available():
import torch
_snake_case : List[Any] = torch.__version__
_snake_case : List[Any] = torch.cuda.is_available()
_snake_case : Any = "not installed"
_snake_case : Union[str, Any] = "NA"
if is_tf_available():
import tensorflow as tf
_snake_case : Union[str, Any] = tf.__version__
try:
# deprecated in v2.1
_snake_case : Tuple = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_snake_case : str = bool(tf.config.list_physical_devices("GPU" ) )
_snake_case : Any = "not installed"
_snake_case : Optional[int] = "not installed"
_snake_case : Union[str, Any] = "not installed"
_snake_case : List[Any] = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
_snake_case : int = flax.__version__
_snake_case : Any = jax.__version__
_snake_case : int = jaxlib.__version__
_snake_case : int = jax.lib.xla_bridge.get_backend().platform
_snake_case : Dict = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f"""{safetensors_version}""",
"Accelerate version": f"""{accelerate_version}""",
"Accelerate config": f"""{accelerate_config_str}""",
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"Tensorflow version (GPU?)": f"""{tf_version} ({tf_cuda_available})""",
"Flax version (CPU?/GPU?/TPU?)": f"""{flax_version} ({jax_backend})""",
"Jax version": f"""{jax_version}""",
"JaxLib version": f"""{jaxlib_version}""",
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(__snake_case ) )
return info
@staticmethod
def lowercase ( __snake_case ) -> List[Any]:
'''simple docstring'''
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 706
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCAmelCase :List[Any] = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def A ( UpperCAmelCase ):
_snake_case : List[str] = test_results.split(" " )
_snake_case : Optional[int] = 0
_snake_case : int = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_snake_case : Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A ( UpperCAmelCase ):
_snake_case : Union[str, Any] = {}
_snake_case : Any = None
_snake_case : str = False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , UpperCAmelCase ):
_snake_case : Union[str, Any] = True
_snake_case : Tuple = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_snake_case : Optional[Any] = line
_snake_case : Dict = False
return failures
class _a:
def __init__( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
_snake_case : Dict = title
_snake_case : Optional[Any] = doc_test_results["time_spent"].split("," )[0]
_snake_case : Dict = doc_test_results["success"]
_snake_case : Optional[Any] = doc_test_results["failures"]
_snake_case : Tuple = self.n_success + self.n_failures
# Failures and success of the modeling tests
_snake_case : Union[str, Any] = doc_test_results
@property
def lowercase ( self ) -> str:
'''simple docstring'''
_snake_case : Dict = [self._time_spent]
_snake_case : Tuple = 0
for time in time_spent:
_snake_case : str = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__snake_case ) == 1:
_snake_case : List[Any] = [0, 0, time_parts[0]]
_snake_case , _snake_case , _snake_case : int = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
_snake_case , _snake_case , _snake_case : List[str] = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return f"""{int(__snake_case )}h{int(__snake_case )}m{int(__snake_case )}s"""
@property
def lowercase ( self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowercase ( self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def lowercase ( self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def lowercase ( self ) -> Dict:
'''simple docstring'''
_snake_case : List[str] = 4_0
_snake_case : Any = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__snake_case , __snake_case )}
_snake_case : int = ""
for category, failures in category_failures.items():
if len(__snake_case ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__snake_case )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def lowercase ( self ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__snake_case )
@staticmethod
def lowercase ( ) -> Dict:
'''simple docstring'''
_snake_case : Dict = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__snake_case )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__snake_case , )
def lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_snake_case : List[str] = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else "All tests passed."
_snake_case : Tuple = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__snake_case , )
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
_snake_case : str = ""
for key, value in failures.items():
_snake_case : Any = value[:2_0_0] + " [Truncated]" if len(__snake_case ) > 2_5_0 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
_snake_case : str = job_name
_snake_case : List[str] = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_snake_case : Union[str, Any] = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_snake_case : Optional[int] = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_snake_case : Tuple = sorted(self.doc_test_results.items() , key=lambda __snake_case : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_snake_case : Tuple = f"""*Num failures* :{len(job_result['failed'] )} \n"""
_snake_case : Tuple = job_result["failures"]
_snake_case : Tuple = self.get_reply_blocks(__snake_case , __snake_case , __snake_case , text=__snake_case )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f"""Results for {job}""" , blocks=__snake_case , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def A ( ):
_snake_case : Optional[Any] = os.environ["GITHUB_RUN_ID"]
_snake_case : Union[str, Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_snake_case : List[Any] = requests.get(UpperCAmelCase ).json()
_snake_case : str = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_snake_case : Union[str, Any] = math.ceil((result["total_count"] - 100) / 100 )
for i in range(UpperCAmelCase ):
_snake_case : Any = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , UpperCAmelCase )
return {}
def A ( UpperCAmelCase ):
_snake_case : Optional[int] = {}
if os.path.exists(UpperCAmelCase ):
_snake_case : Optional[Any] = os.listdir(UpperCAmelCase )
for file in files:
try:
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , encoding="utf-8" ) as f:
_snake_case : Optional[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(UpperCAmelCase , UpperCAmelCase )}.""" ) from e
return _artifact
def A ( ):
class _a:
def __init__( self , __snake_case ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = name
_snake_case : Any = []
def __str__( self ) -> Tuple:
'''simple docstring'''
return self.name
def lowercase ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
_snake_case : Dict[str, Artifact] = {}
_snake_case : Any = filter(os.path.isdir , os.listdir() )
for directory in directories:
_snake_case : Optional[int] = directory
if artifact_name not in _available_artifacts:
_snake_case : Optional[Any] = Artifact(UpperCAmelCase )
_available_artifacts[artifact_name].add_path(UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
__lowerCAmelCase :str = get_job_links()
__lowerCAmelCase :Optional[int] = retrieve_available_artifacts()
__lowerCAmelCase :Dict = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCAmelCase :Any = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCAmelCase :str = github_actions_job_links.get('run_doctests')
__lowerCAmelCase :List[Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
__lowerCAmelCase :Optional[Any] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase :Dict = handle_test_results(artifact['stats'])
__lowerCAmelCase :List[Any] = failed
__lowerCAmelCase :Optional[int] = success
__lowerCAmelCase :str = time_spent[1:-1] + ', '
__lowerCAmelCase :Optional[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
__lowerCAmelCase :Any = line.replace('FAILED ', '')
__lowerCAmelCase :int = line.split()[0].replace('\n', '')
if "::" in line:
__lowerCAmelCase , __lowerCAmelCase :List[str] = line.split('::')
else:
__lowerCAmelCase , __lowerCAmelCase :int = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCAmelCase :Any = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCAmelCase :Union[str, Any] = all_failures[test] if test in all_failures else 'N/A'
__lowerCAmelCase :Optional[Any] = failure
break
__lowerCAmelCase :Optional[int] = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 278
| 0
|
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : pyspark.sql.DataFrame , __magic_name__ : Optional[NamedSplit] = None , __magic_name__ : Optional[Features] = None , __magic_name__ : bool = True , __magic_name__ : str = None , __magic_name__ : bool = False , __magic_name__ : str = None , __magic_name__ : bool = True , __magic_name__ : str = "arrow" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(
split=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ , streaming=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = load_from_cache_file
lowerCAmelCase__ = file_format
lowerCAmelCase__ = Spark(
df=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , working_dir=__magic_name__ , **__magic_name__ , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__magic_name__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 48
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : Any = "mvp"
lowerCAmelCase__ : str = ["past_key_values"]
lowerCAmelCase__ : List[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] ,UpperCamelCase : int=5_0267 ,UpperCamelCase : Any=1024 ,UpperCamelCase : List[str]=12 ,UpperCamelCase : Optional[Any]=4096 ,UpperCamelCase : Tuple=16 ,UpperCamelCase : int=12 ,UpperCamelCase : List[str]=4096 ,UpperCamelCase : Dict=16 ,UpperCamelCase : str=0.0 ,UpperCamelCase : str=0.0 ,UpperCamelCase : Tuple="gelu" ,UpperCamelCase : int=1024 ,UpperCamelCase : Union[str, Any]=0.1 ,UpperCamelCase : int=0.0 ,UpperCamelCase : int=0.0 ,UpperCamelCase : Tuple=0.0_2 ,UpperCamelCase : Tuple=0.0 ,UpperCamelCase : List[str]=False ,UpperCamelCase : Any=True ,UpperCamelCase : str=1 ,UpperCamelCase : Optional[int]=0 ,UpperCamelCase : Dict=2 ,UpperCamelCase : List[str]=True ,UpperCamelCase : Any=2 ,UpperCamelCase : Optional[int]=2 ,UpperCamelCase : List[Any]=False ,UpperCamelCase : str=100 ,UpperCamelCase : str=800 ,**UpperCamelCase : str ,) -> int:
_lowercase : Optional[int] = vocab_size
_lowercase : Tuple = max_position_embeddings
_lowercase : List[Any] = d_model
_lowercase : Any = encoder_ffn_dim
_lowercase : Optional[Any] = encoder_layers
_lowercase : Optional[int] = encoder_attention_heads
_lowercase : List[str] = decoder_ffn_dim
_lowercase : List[Any] = decoder_layers
_lowercase : int = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[int] = attention_dropout
_lowercase : Union[str, Any] = activation_dropout
_lowercase : List[Any] = activation_function
_lowercase : Dict = init_std
_lowercase : Any = encoder_layerdrop
_lowercase : str = decoder_layerdrop
_lowercase : Tuple = classifier_dropout
_lowercase : Tuple = use_cache
_lowercase : int = encoder_layers
_lowercase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase : Any = use_prompt
_lowercase : Optional[int] = prompt_length
_lowercase : Any = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase ,bos_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,decoder_start_token_id=UpperCamelCase ,forced_eos_token_id=UpperCamelCase ,**UpperCamelCase ,)
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' ,UpperCamelCase ):
_lowercase : List[Any] = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
| 125
| 0
|
import requests
from bsa import BeautifulSoup
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "https://www.worldometers.info/coronavirus" ) -> dict:
lowerCAmelCase__ : List[str] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , 'html.parser' )
lowerCAmelCase__ : int = soup.findAll('h1' )
lowerCAmelCase__ : Optional[int] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 705
|
from itertools import permutations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase__ : str = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int:
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 69
| 0
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase__ = 1_2_8_0_2_2
UpperCamelCase__ = 1_2_8_0_2_8
@require_sentencepiece
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = MaMaaaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : List[Any] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase__ : List[str] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Any = Path(self.tmpdirname )
save_json(_A , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_A , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
UpperCAmelCase__ : Optional[int] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : str , **_A : str ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Union[str, Any] , _A : Any ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''</s>'''
UpperCAmelCase__ : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(_A ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_tokenizer()
UpperCAmelCase__ : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2, 3, 4, 5, 6] , )
UpperCAmelCase__ : Dict = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
UpperCAmelCase__ : Optional[int] = tokenizer.convert_tokens_to_string(_A )
self.assertEqual(_A , '''This is a test''' )
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {'''input_ids''': [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = 'facebook/m2m100_418M'
lowerCAmelCase__ = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
lowerCAmelCase__ = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
lowerCAmelCase__ = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowercase_ ( cls : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
UpperCAmelCase__ : str = 1
return cls
def lowercase_ ( self : Dict ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128_063 )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.tokenizer.get_vocab()
self.assertEqual(len(_A ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''en'''
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def lowercase_ ( self : str ):
'''simple docstring'''
self.assertIn(_A , self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase__ : Tuple = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
UpperCAmelCase__ : int = self.tokenizer.decode(_A , skip_special_tokens=_A )
UpperCAmelCase__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tempfile.mkdtemp()
UpperCAmelCase__ : int = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Any = MaMaaaTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.lang_token_to_id , _A )
@require_torch
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''en'''
UpperCAmelCase__ : str = '''fr'''
UpperCAmelCase__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='''pt''' )
UpperCAmelCase__ : Any = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
UpperCAmelCase__ : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
UpperCAmelCase__ : Optional[Any] = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
UpperCAmelCase__ : int = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(_A ) , {
# en_XX, A, test, EOS
'''input_ids''': [[128_022, 58, 4_183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128_006,
} , )
| 75
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38
| 0
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A : Union[str, Any] = WavaVecaPhonemeCTCTokenizer
A : Any = False
def snake_case__ ( self : str ):
super().setUp()
__snake_case : Optional[Any] = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
__snake_case : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
__snake_case : Any = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
__snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Tuple=20 , _lowerCAmelCase : int=5 ):
__snake_case : Tuple = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_a )) for i in range(len(_a ) )]
__snake_case : List[str] = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_a ) , _a ) )
if max_length is not None and len(_a ) > max_length:
__snake_case : List[Any] = toks[:max_length]
if min_length is not None and len(_a ) < min_length and len(_a ) > 0:
while len(_a ) < min_length:
__snake_case : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : Tuple = [t[0] for t in toks]
# Ensure consistency
__snake_case : Tuple = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
if " " not in output_txt and len(_a ) > 1:
__snake_case : str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_a )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_a )
)
if with_prefix_space:
__snake_case : int = """ """ + output_txt
__snake_case : List[str] = tokenizer.encode(_a , add_special_tokens=_a )
return output_txt, output_ids
def snake_case__ ( self : Dict , **_lowerCAmelCase : Any ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_a )
def snake_case__ ( self : Tuple ):
__snake_case : str = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
__snake_case : str = tokenizer("""m xxx ɪ""" , do_phonemize=_a ).input_ids
self.assertEqual(_a , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
__snake_case : List[str] = tokenizer("""m aaa ɪ ccc""" , do_phonemize=_a ).input_ids
self.assertEqual(_a , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
__snake_case : int = tokenizer("""maɪ c""" , do_phonemize=_a ).input_ids
self.assertEqual(_a , [3, 2_00] ) # mai should be <unk> (=3)
def snake_case__ ( self : List[Any] ):
__snake_case : str = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__snake_case : int = """Hello how are you"""
__snake_case : Tuple = tokenizer.phonemize(_a , phonemizer_lang="""en-us""" )
self.assertEqual(_a , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def snake_case__ ( self : Optional[int] ):
__snake_case : int = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__snake_case : Any = """Hello how are you"""
__snake_case : List[str] = tokenizer.phonemize(_a , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(_a ).input_ids , tokenizer(_a , do_phonemize=_a ).input_ids )
def snake_case__ ( self : Any ):
__snake_case : str = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__snake_case : int = """Hello how are you"""
__snake_case : Optional[Any] = tokenizer.phonemize(_a , phonemizer_lang="""en-us""" )
__snake_case : Tuple = tokenizer.decode(tokenizer(_a ).input_ids )
self.assertEqual(_a , _a )
def snake_case__ ( self : str ):
__snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__snake_case : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__snake_case : Union[str, Any] = tokenizer.decode(sample_ids[0] )
__snake_case : int = tokenizer.batch_decode(_a )
self.assertEqual(_a , batch_tokens[0] )
self.assertEqual(_a , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def snake_case__ ( self : Optional[Any] ):
__snake_case : str = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__snake_case : Optional[Any] = """Hello how are you"""
__snake_case : Dict = tokenizer.phonemize(_a , phonemizer_lang="""en-us""" )
self.assertEqual(_a , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def snake_case__ ( self : List[str] ):
__snake_case : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__snake_case : Any = """Hello how are you"""
__snake_case : List[str] = tokenizer.phonemize(_a , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(_a ).input_ids , tokenizer(_a , do_phonemize=_a ).input_ids )
def snake_case__ ( self : Tuple ):
__snake_case : int = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
__snake_case : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__snake_case : Dict = tokenizer.decode(sample_ids[0] )
__snake_case : Optional[int] = tokenizer.batch_decode(_a )
self.assertEqual(_a , batch_tokens[0] )
self.assertEqual(_a , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
__snake_case : Optional[int] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_a )
__snake_case : str = tokenizer.batch_decode(_a , filter_word_delimiter_token=_a )
self.assertEqual(_a , batch_tokens[0] )
self.assertEqual(_a , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def snake_case__ ( self : int ):
__snake_case : List[str] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__snake_case : List[str] = """Hello how are you"""
__snake_case : Dict = tokenizer.phonemize(_a , phonemizer_lang="""en-us""" )
__snake_case : str = tokenizer.decode(tokenizer(_a ).input_ids , filter_word_delimiter_token=_a )
self.assertEqual(_a , _a )
def snake_case__ ( self : Dict ):
__snake_case : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
__snake_case : Dict = """Hello how are you"""
__snake_case : Dict = tokenizer.phonemize(_a , phonemizer_lang="""en-us""" )
__snake_case : List[str] = tokenizer.decode(tokenizer(_a ).input_ids , filter_word_delimiter_token=_a )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , _a )
def snake_case__ ( self : Optional[Any] ):
__snake_case : str = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=_a )
__snake_case : Dict = """Hello how are you"""
__snake_case : str = tokenizer(_a , phonemizer_lang="""en-us""" ).input_ids
__snake_case : List[str] = tokenizer(_a , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(_a , _a )
__snake_case : Union[str, Any] = tokenizer.decode(_a )
__snake_case : int = tokenizer.decode(_a )
self.assertEqual(_a , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(_a , """ɛ l o h aʊ a ʁ j u""" )
def snake_case__ ( self : Dict ):
__snake_case : Any = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
__snake_case : Any = """Hello how Are you"""
__snake_case : List[Any] = """hello how are you"""
__snake_case : Union[str, Any] = tokenizer(_a ).input_ids
__snake_case : List[str] = tokenizer(_a ).input_ids
self.assertEqual(_a , _a )
def snake_case__ ( self : Any ):
__snake_case : str = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
__snake_case : Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
__snake_case : Dict = tokenizer.batch_decode(_a )
self.assertEqual(_a , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def snake_case__ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ):
__snake_case : Dict = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : Optional[Any] ):
__snake_case : int = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__snake_case : List[Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__snake_case : Union[str, Any] = tokenizer.decode(_a , output_char_offsets=_a , filter_word_delimiter_token=_a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(_a , _a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[Any] = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(_lowerCAmelCase : Tuple , _lowerCAmelCase : str ):
self.assertTrue(isinstance(_a , _a ) )
self.assertTrue(isinstance(outputs_list[0] , _a ) )
# transform list to ModelOutput
__snake_case : Union[str, Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(_lowerCAmelCase : Optional[int] , _lowerCAmelCase : int ):
if isinstance(_a , _a ):
[recursive_check(_a , _a ) for la, la in zip(_a , _a )]
self.assertEqual(_a , _a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
__snake_case : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__snake_case : Union[str, Any] = tokenizer.batch_decode(_a , output_char_offsets=_a )
__snake_case : Union[str, Any] = [tokenizer.decode(_a , output_char_offsets=_a ) for ids in sample_ids]
check_list_tuples_equal(_a , _a )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def snake_case__ ( self : Dict ):
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def snake_case__ ( self : List[Any] ):
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def snake_case__ ( self : Tuple ):
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def snake_case__ ( self : Any ):
pass
def snake_case__ ( self : int ):
__snake_case : Optional[int] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : Union[str, Any] = tokenizer.vocab_size
__snake_case : Any = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__snake_case : Optional[Any] = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__snake_case : int = tokenizer.add_tokens(_a )
__snake_case : int = tokenizer.vocab_size
__snake_case : List[Any] = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
__snake_case : List[str] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__snake_case : Tuple = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__snake_case : List[str] = tokenizer.add_special_tokens(_a )
__snake_case : int = tokenizer.vocab_size
__snake_case : List[Any] = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
__snake_case : Optional[int] = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : List[Any] ):
pass
@unittest.skip("""The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.""" )
def snake_case__ ( self : Optional[Any] ):
pass
def snake_case__ ( self : Optional[int] ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
__snake_case : str = self.get_tokenizers(fast=_a , do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : Tuple = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
__snake_case : List[Any] = tokenizer.convert_tokens_to_string(_a )
self.assertIsInstance(output["""text"""] , _a )
| 721
|
import numpy as np
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : np.array ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 390
| 0
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_snake_case : str = logging.get_logger(__name__)
_snake_case : Tuple = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_snake_case : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Model type selected in the list: """ + """, """.join(_UpperCamelCase )} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a_ = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ = field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a_ = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a_ = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a_ = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a_ = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a_ = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a_ = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """train"""
a_ = """dev"""
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def __init__( self : int , lowerCAmelCase_ : SquadDataTrainingArguments , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Union[str, Split] = Split.train , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[str] = "pt" , ) -> Tuple:
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = 'v2' if args.version_2_with_negative else 'v1'
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + '.lock'
with FileLock(lowerCAmelCase_ ):
if os.path.exists(lowerCAmelCase_ ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(lowerCAmelCase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features['features']
__lowerCAmelCase = self.old_features.get('dataset' , lowerCAmelCase_ )
__lowerCAmelCase = self.old_features.get('examples' , lowerCAmelCase_ )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
' future run' )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowerCAmelCase_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowerCAmelCase_ , )
__lowerCAmelCase = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , lowerCAmelCase_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Tuple ) -> List[str]:
return len(self.features )
def __getitem__( self : Dict , lowerCAmelCase_ : Optional[int] ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
__lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 53
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
# TODO Update this
__lowerCAmelCase = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'esm'
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=768 ,_UpperCAmelCase : Union[str, Any]=12 ,_UpperCAmelCase : List[str]=12 ,_UpperCAmelCase : Tuple=3072 ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : List[str]=1026 ,_UpperCAmelCase : List[str]=0.02 ,_UpperCAmelCase : Optional[int]=1E-12 ,_UpperCAmelCase : List[str]="absolute" ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : int=False ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Union[str, Any]=None ,**_UpperCAmelCase : List[Any] ,):
super().__init__(pad_token_id=_UpperCAmelCase ,mask_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Optional[Any] = vocab_size
_a : Union[str, Any] = hidden_size
_a : Dict = num_hidden_layers
_a : int = num_attention_heads
_a : Dict = intermediate_size
_a : List[Any] = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Optional[Any] = max_position_embeddings
_a : Optional[int] = initializer_range
_a : List[Any] = layer_norm_eps
_a : int = position_embedding_type
_a : Optional[int] = use_cache
_a : Any = emb_layer_norm_before
_a : List[str] = token_dropout
_a : List[str] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
_a : Dict = EsmFoldConfig()
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = EsmFoldConfig(**_UpperCAmelCase )
_a : Optional[int] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
_a : Optional[int] = get_default_vocab_list()
else:
_a : Optional[int] = vocab_list
else:
_a : Optional[Any] = None
_a : Union[str, Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,'use_esm_attn_map' ,_UpperCAmelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def __lowercase ( self : Any ):
_a : str = super().to_dict()
if isinstance(self.esmfold_config ,_UpperCAmelCase ):
_a : List[str] = self.esmfold_config.to_dict()
return output
@dataclass
class __magic_name__ :
lowerCAmelCase : str = None
lowerCAmelCase : bool = True
lowerCAmelCase : bool = False
lowerCAmelCase : bool = False
lowerCAmelCase : bool = False
lowerCAmelCase : float = 0
lowerCAmelCase : bool = True
lowerCAmelCase : bool = False
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : "TrunkConfig" = None
def __lowercase ( self : List[str] ):
if self.trunk is None:
_a : Dict = TrunkConfig()
elif isinstance(self.trunk ,_UpperCAmelCase ):
_a : str = TrunkConfig(**self.trunk )
def __lowercase ( self : List[Any] ):
_a : List[str] = asdict(self )
_a : List[str] = self.trunk.to_dict()
return output
@dataclass
class __magic_name__ :
lowerCAmelCase : int = 4_8
lowerCAmelCase : int = 1_0_2_4
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 3_2
lowerCAmelCase : int = 3_2
lowerCAmelCase : int = 3_2
lowerCAmelCase : float = 0
lowerCAmelCase : float = 0
lowerCAmelCase : bool = False
lowerCAmelCase : int = 4
lowerCAmelCase : Optional[int] = 1_2_8
lowerCAmelCase : "StructureModuleConfig" = None
def __lowercase ( self : str ):
if self.structure_module is None:
_a : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,_UpperCAmelCase ):
_a : List[str] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_a : Optional[int] = self.sequence_state_dim // self.sequence_head_width
_a : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = asdict(self )
_a : Optional[Any] = self.structure_module.to_dict()
return output
@dataclass
class __magic_name__ :
lowerCAmelCase : int = 3_8_4
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 1_6
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 1_2
lowerCAmelCase : int = 4
lowerCAmelCase : int = 8
lowerCAmelCase : float = 0.1
lowerCAmelCase : int = 8
lowerCAmelCase : int = 1
lowerCAmelCase : int = 2
lowerCAmelCase : int = 7
lowerCAmelCase : int = 1_0
lowerCAmelCase : float = 1e-8
lowerCAmelCase : float = 1e5
def __lowercase ( self : str ):
return asdict(self )
def __lowerCamelCase ( ) -> Optional[int]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 358
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase_ = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _A ( _lowerCamelCase ):
_UpperCamelCase : Union[PIL.Image.Image, np.ndarray]
class _A ( _lowerCamelCase ):
def __init__( self : Optional[int] , _A : PriorTransformer , _A : CLIPVisionModel , _A : CLIPImageProcessor , _A : HeunDiscreteScheduler , _A : ShapERenderer , ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=_A , image_encoder=_A , image_processor=_A , scheduler=_A , renderer=_A , )
def __a ( self : Tuple , _A : str , _A : Optional[int] , _A : List[Any] , _A : int , _A : Union[str, Any] , _A : Any ) -> Optional[Any]:
"""simple docstring"""
if latents is None:
lowercase : int = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase : Dict = latents.to(_A )
lowercase : int = latents * scheduler.init_noise_sigma
return latents
def __a ( self : List[str] , _A : str=0 ) -> List[str]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase : Union[str, Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
@property
def __a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __a ( self : Optional[int] , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Dict , ) -> Optional[int]:
"""simple docstring"""
if isinstance(_A , _A ) and isinstance(image[0] , torch.Tensor ):
lowercase : Optional[Any] = torch.cat(_A , axis=0 ) if image[0].ndim == 4 else torch.stack(_A , axis=0 )
if not isinstance(_A , torch.Tensor ):
lowercase : Optional[int] = self.image_processor(_A , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase : str = image.to(dtype=self.image_encoder.dtype , device=_A )
lowercase : int = self.image_encoder(_A )['''last_hidden_state''']
lowercase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase : Optional[int] = image_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
lowercase : Any = torch.zeros_like(_A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : Dict = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : Tuple , _A : Union[PIL.Image.Image, List[PIL.Image.Image]] , _A : int = 1 , _A : int = 25 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : float = 4.0 , _A : int = 64 , _A : Optional[str] = "pil" , _A : bool = True , ) -> List[Any]:
"""simple docstring"""
if isinstance(_A , PIL.Image.Image ):
lowercase : Tuple = 1
elif isinstance(_A , torch.Tensor ):
lowercase : int = image.shape[0]
elif isinstance(_A , _A ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase : Union[str, Any] = len(_A )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_A )}""" )
lowercase : Dict = self._execution_device
lowercase : Optional[int] = batch_size * num_images_per_prompt
lowercase : Dict = guidance_scale > 1.0
lowercase : int = self._encode_image(_A , _A , _A , _A )
# prior
self.scheduler.set_timesteps(_A , device=_A )
lowercase : List[str] = self.scheduler.timesteps
lowercase : Optional[int] = self.prior.config.num_embeddings
lowercase : List[Any] = self.prior.config.embedding_dim
lowercase : Any = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _A , _A , _A , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase : Union[str, Any] = latents.reshape(latents.shape[0] , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
lowercase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase : List[Any] = self.scheduler.scale_model_input(_A , _A )
lowercase : List[Any] = self.prior(
_A , timestep=_A , proj_embedding=_A , ).predicted_image_embedding
# remove the variance
lowercase , lowercase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase , lowercase : Any = noise_pred.chunk(2 )
lowercase : Any = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase : Tuple = self.scheduler.step(
_A , timestep=_A , sample=_A , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_A )
lowercase : List[str] = []
for i, latent in enumerate(_A ):
print()
lowercase : List[Any] = self.renderer.decode(
latent[None, :] , _A , size=_A , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_A )
lowercase : List[str] = torch.stack(_A )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
lowercase : str = images.cpu().numpy()
if output_type == "pil":
lowercase : int = [self.numpy_to_pil(_A ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_A )
| 596
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _A :
_UpperCamelCase : Dict = BlenderbotSmallConfig
_UpperCamelCase : int = {}
_UpperCamelCase : Optional[int] = '''gelu'''
def __init__( self : Union[str, Any] , _A : Union[str, Any] , _A : List[str]=13 , _A : Optional[int]=7 , _A : Optional[int]=True , _A : Any=False , _A : Optional[int]=99 , _A : Any=32 , _A : Optional[Any]=2 , _A : Any=4 , _A : int=37 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=20 , _A : Any=2 , _A : Optional[int]=1 , _A : str=0 , ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] = parent
lowercase : List[str] = batch_size
lowercase : int = seq_length
lowercase : Optional[int] = is_training
lowercase : str = use_labels
lowercase : Any = vocab_size
lowercase : int = hidden_size
lowercase : Dict = num_hidden_layers
lowercase : Tuple = num_attention_heads
lowercase : Dict = intermediate_size
lowercase : Tuple = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : Tuple = max_position_embeddings
lowercase : int = eos_token_id
lowercase : Tuple = pad_token_id
lowercase : List[Any] = bos_token_id
def __a ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase : Optional[Any] = prepare_blenderbot_small_inputs_dict(_A , _A , _A )
return config, inputs_dict
def __a ( self : Optional[int] , _A : Tuple , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = TFBlenderbotSmallModel(config=_A ).get_decoder()
lowercase : List[str] = inputs_dict['''input_ids''']
lowercase : Union[str, Any] = input_ids[:1, :]
lowercase : str = inputs_dict['''attention_mask'''][:1, :]
lowercase : str = inputs_dict['''head_mask''']
lowercase : Optional[int] = 1
# first forward pass
lowercase : Union[str, Any] = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
lowercase , lowercase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase : Any = model(_A , attention_mask=_A )[0]
lowercase : Union[str, Any] = model(_A , attention_mask=_A , past_key_values=_A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase : Any = output_from_no_past[:, -3:, random_slice_idx]
lowercase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_A , _A , rtol=1E-3 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ) -> str:
'''simple docstring'''
if attention_mask is None:
lowercase : Optional[Any] = tf.cast(tf.math.not_equal(__magic_name__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_UpperCamelCase : int = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_UpperCamelCase : Any = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : int = False
def __a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = TFBlenderbotSmallModelTester(self )
lowercase : List[str] = ConfigTester(self , config_class=_A )
def __a ( self : Dict ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_tokenizers
@require_tf
class _A ( unittest.TestCase ):
_UpperCamelCase : Optional[Any] = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
_UpperCamelCase : Optional[Any] = '''facebook/blenderbot_small-90M'''
@cached_property
def __a ( self : Optional[int] ) -> int:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self.tokenizer(self.src_text , return_tensors='''tf''' )
lowercase : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_A , )
lowercase : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_A )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 596
| 1
|
from __future__ import annotations
import math
import random
from typing import Any
class _a :
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = 0
def _UpperCAmelCase ( self ) -> bool:
return self.head == self.tail
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
self.data.append(_UpperCAmelCase )
UpperCamelCase_ = self.tail + 1
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.data[self.head]
UpperCamelCase_ = self.head + 1
return ret
def _UpperCAmelCase ( self ) -> int:
return self.tail - self.head
def _UpperCAmelCase ( self ) -> None:
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = data
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = 1
def _UpperCAmelCase ( self ) -> Any:
return self.data
def _UpperCAmelCase ( self ) -> MyNode | None:
return self.left
def _UpperCAmelCase ( self ) -> MyNode | None:
return self.right
def _UpperCAmelCase ( self ) -> int:
return self.height
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = data
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = node
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = node
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = height
def _snake_case (__lowercase):
if node is None:
return 0
return node.get_height()
def _snake_case (__lowercase , __lowercase):
if a > b:
return a
return b
def _snake_case (__lowercase):
print('left rotation node:' , node.get_data())
UpperCamelCase_ = node.get_left()
assert ret is not None
node.set_left(ret.get_right())
ret.set_right(__lowercase)
UpperCamelCase_ = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(__lowercase)
UpperCamelCase_ = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(__lowercase)
return ret
def _snake_case (__lowercase):
print('right rotation node:' , node.get_data())
UpperCamelCase_ = node.get_right()
assert ret is not None
node.set_right(ret.get_left())
ret.set_left(__lowercase)
UpperCamelCase_ = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(__lowercase)
UpperCamelCase_ = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(__lowercase)
return ret
def _snake_case (__lowercase):
UpperCamelCase_ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__lowercase))
return right_rotation(__lowercase)
def _snake_case (__lowercase):
UpperCamelCase_ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__lowercase))
return left_rotation(__lowercase)
def _snake_case (__lowercase , __lowercase):
if node is None:
return MyNode(__lowercase)
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __lowercase))
if (
get_height(node.get_left()) - get_height(node.get_right()) == 2
): # an unbalance detected
UpperCamelCase_ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
UpperCamelCase_ = right_rotation(__lowercase)
else:
UpperCamelCase_ = lr_rotation(__lowercase)
else:
node.set_right(insert_node(node.get_right() , __lowercase))
if get_height(node.get_right()) - get_height(node.get_left()) == 2:
UpperCamelCase_ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
UpperCamelCase_ = rl_rotation(__lowercase)
else:
UpperCamelCase_ = left_rotation(__lowercase)
UpperCamelCase_ = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(__lowercase)
return node
def _snake_case (__lowercase):
while True:
UpperCamelCase_ = root.get_right()
if right_child is None:
break
UpperCamelCase_ = right_child
return root.get_data()
def _snake_case (__lowercase):
while True:
UpperCamelCase_ = root.get_left()
if left_child is None:
break
UpperCamelCase_ = left_child
return root.get_data()
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = root.get_left()
UpperCamelCase_ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
UpperCamelCase_ = get_left_most(__lowercase)
root.set_data(__lowercase)
root.set_right(del_node(__lowercase , __lowercase))
elif left_child is not None:
UpperCamelCase_ = left_child
elif right_child is not None:
UpperCamelCase_ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data')
return root
else:
root.set_left(del_node(__lowercase , __lowercase))
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__lowercase , __lowercase))
if get_height(__lowercase) - get_height(__lowercase) == 2:
assert right_child is not None
if get_height(right_child.get_right()) > get_height(right_child.get_left()):
UpperCamelCase_ = left_rotation(__lowercase)
else:
UpperCamelCase_ = rl_rotation(__lowercase)
elif get_height(__lowercase) - get_height(__lowercase) == -2:
assert left_child is not None
if get_height(left_child.get_left()) > get_height(left_child.get_right()):
UpperCamelCase_ = right_rotation(__lowercase)
else:
UpperCamelCase_ = lr_rotation(__lowercase)
UpperCamelCase_ = my_max(get_height(root.get_right()) , get_height(root.get_left())) + 1
root.set_height(__lowercase)
return root
class _a :
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = None
def _UpperCAmelCase ( self ) -> int:
return get_height(self.root )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
print('insert:' + str(_UpperCAmelCase ) )
UpperCamelCase_ = insert_node(self.root , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
print('delete:' + str(_UpperCAmelCase ) )
if self.root is None:
print('Tree is empty!' )
return
UpperCamelCase_ = del_node(self.root , _UpperCAmelCase )
def __str__( self , ) -> str: # a level traversale, gives a more intuitive look on the tree
UpperCamelCase_ = ''
UpperCamelCase_ = MyQueue()
q.push(self.root )
UpperCamelCase_ = self.get_height()
if layer == 0:
return output
UpperCamelCase_ = 0
while not q.is_empty():
UpperCamelCase_ = q.pop()
UpperCamelCase_ = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_UpperCAmelCase )
q.push(_UpperCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
UpperCamelCase_ = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _UpperCAmelCase ) - 1:
UpperCamelCase_ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _snake_case ():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
snake_case__ : Optional[Any] = AVLtree()
snake_case__ : Union[str, Any] = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 23
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = is_training
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = num_queries
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_size
UpperCamelCase_ = max_size
UpperCamelCase_ = num_labels
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = hidden_dim
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
UpperCamelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
UpperCamelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase_ = self.num_queries
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = [1, 1, 1, 1]
UpperCamelCase_ = self.num_channels
UpperCamelCase_ = 64
UpperCamelCase_ = 128
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
return config
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = output.encoder_hidden_states
UpperCamelCase_ = output.pixel_decoder_hidden_states
UpperCamelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any:
with torch.no_grad():
UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
UpperCamelCase_ = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = (self.model_tester.min_size,) * 2
UpperCamelCase_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
UpperCamelCase_ = self.model_tester.get_config()
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
UpperCamelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case__ : List[Any] = 1E-4
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# masks_queries_logits
UpperCamelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
UpperCamelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase )
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']]
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 23
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
a__ : str = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A , __A , __A=False , ) -> Any:
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def _UpperCamelCase ( __A , __A , __A , __A = False ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCamelCase__ = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
UpperCamelCase__ = "cpu"
UpperCamelCase__ = Path(__A )
# VAE DECODER
UpperCamelCase__ = AutoencoderKL.from_pretrained(model_path + "/vae" )
UpperCamelCase__ = vae_decoder.config.latent_channels
# forward only through the decoder part
UpperCamelCase__ = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
a__ : Optional[int] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 702
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase_ ( a__ ):
@staticmethod
@abstractmethod
def __a ( a ):
raise NotImplementedError()
@abstractmethod
def __a ( self ):
raise NotImplementedError()
| 223
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (__A , unittest.TestCase ):
"""simple docstring"""
_a : Optional[Any] = AudioLDMPipeline
_a : List[Any] = TEXT_TO_AUDIO_PARAMS
_a : Dict = TEXT_TO_AUDIO_BATCH_PARAMS
_a : int = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _a ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=UpperCamelCase__ , )
a_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
a_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a_ = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
a_ = ClapTextModelWithProjection(UpperCamelCase__ )
a_ = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
a_ = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=UpperCamelCase__ , )
a_ = SpeechTaHifiGan(UpperCamelCase__ )
a_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def _a ( self , UpperCamelCase__ , UpperCamelCase__=0 ):
"""simple docstring"""
if str(UpperCamelCase__ ).startswith('mps' ):
a_ = torch.manual_seed(UpperCamelCase__ )
else:
a_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
a_ = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def _a ( self ):
"""simple docstring"""
a_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components()
a_ = AudioLDMPipeline(**UpperCamelCase__ )
a_ = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = self.get_dummy_inputs(UpperCamelCase__ )
a_ = audioldm_pipe(**UpperCamelCase__ )
a_ = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase__ ) == 256
a_ = audio[:10]
a_ = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def _a ( self ):
"""simple docstring"""
a_ = self.get_dummy_components()
a_ = AudioLDMPipeline(**UpperCamelCase__ )
a_ = audioldm_pipe.to(UpperCamelCase__ )
a_ = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = self.get_dummy_inputs(UpperCamelCase__ )
a_ = 3 * [inputs['prompt']]
# forward
a_ = audioldm_pipe(**UpperCamelCase__ )
a_ = output.audios[0]
a_ = self.get_dummy_inputs(UpperCamelCase__ )
a_ = 3 * [inputs.pop('prompt' )]
a_ = audioldm_pipe.tokenizer(
UpperCamelCase__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , )
a_ = text_inputs['input_ids'].to(UpperCamelCase__ )
a_ = audioldm_pipe.text_encoder(
UpperCamelCase__ , )
a_ = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
a_ = F.normalize(UpperCamelCase__ , dim=-1 )
a_ = prompt_embeds
# forward
a_ = audioldm_pipe(**UpperCamelCase__ )
a_ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def _a ( self ):
"""simple docstring"""
a_ = self.get_dummy_components()
a_ = AudioLDMPipeline(**UpperCamelCase__ )
a_ = audioldm_pipe.to(UpperCamelCase__ )
a_ = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = self.get_dummy_inputs(UpperCamelCase__ )
a_ = 3 * ['this is a negative prompt']
a_ = negative_prompt
a_ = 3 * [inputs['prompt']]
# forward
a_ = audioldm_pipe(**UpperCamelCase__ )
a_ = output.audios[0]
a_ = self.get_dummy_inputs(UpperCamelCase__ )
a_ = 3 * [inputs.pop('prompt' )]
a_ = []
for p in [prompt, negative_prompt]:
a_ = audioldm_pipe.tokenizer(
UpperCamelCase__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , )
a_ = text_inputs['input_ids'].to(UpperCamelCase__ )
a_ = audioldm_pipe.text_encoder(
UpperCamelCase__ , )
a_ = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
a_ = F.normalize(UpperCamelCase__ , dim=-1 )
embeds.append(UpperCamelCase__ )
a_ , a_ = embeds
# forward
a_ = audioldm_pipe(**UpperCamelCase__ )
a_ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def _a ( self ):
"""simple docstring"""
a_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components()
a_ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
a_ = AudioLDMPipeline(**UpperCamelCase__ )
a_ = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = self.get_dummy_inputs(UpperCamelCase__ )
a_ = 'egg cracking'
a_ = audioldm_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
a_ = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase__ ) == 256
a_ = audio[:10]
a_ = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def _a ( self ):
"""simple docstring"""
a_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components()
a_ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
a_ = AudioLDMPipeline(**UpperCamelCase__ )
a_ = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
a_ = audioldm_pipe(UpperCamelCase__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
a_ = 2
a_ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
a_ = 2
a_ = audioldm_pipe(UpperCamelCase__ , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
a_ = 2
a_ = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def _a ( self ):
"""simple docstring"""
a_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components()
a_ = AudioLDMPipeline(**UpperCamelCase__ )
a_ = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = audioldm_pipe.vocoder.config.sampling_rate
a_ = self.get_dummy_inputs(UpperCamelCase__ )
a_ = audioldm_pipe(audio_length_in_s=0.016 , **UpperCamelCase__ )
a_ = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase__ ) / vocoder_sampling_rate == 0.016
a_ = audioldm_pipe(audio_length_in_s=0.032 , **UpperCamelCase__ )
a_ = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase__ ) / vocoder_sampling_rate == 0.032
def _a ( self ):
"""simple docstring"""
a_ = self.get_dummy_components()
a_ = AudioLDMPipeline(**UpperCamelCase__ )
a_ = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = ['hey']
a_ = audioldm_pipe(UpperCamelCase__ , num_inference_steps=1 )
a_ = output.audios.shape
assert audio_shape == (1, 256)
a_ = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
a_ = SpeechTaHifiGan(UpperCamelCase__ ).to(UpperCamelCase__ )
a_ = audioldm_pipe(UpperCamelCase__ , num_inference_steps=1 )
a_ = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def _a ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=UpperCamelCase__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase__ )
@slow
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , UpperCamelCase__ , UpperCamelCase__="cpu" , UpperCamelCase__=torch.floataa , UpperCamelCase__=0 ):
"""simple docstring"""
a_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
a_ = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 8, 128, 16) )
a_ = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
a_ = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def _a ( self ):
"""simple docstring"""
a_ = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
a_ = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = self.get_inputs(UpperCamelCase__ )
a_ = 25
a_ = audioldm_pipe(**UpperCamelCase__ ).audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase__ ) == 81_920
a_ = audio[77_230:77_240]
a_ = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
a_ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def _a ( self ):
"""simple docstring"""
a_ = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
a_ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
a_ = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = self.get_inputs(UpperCamelCase__ )
a_ = audioldm_pipe(**UpperCamelCase__ ).audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase__ ) == 81_920
a_ = audio[27_780:27_790]
a_ = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
a_ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 536
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : List[Any] = ['''image_processor''', '''tokenizer''']
_a : List[Any] = '''ViTImageProcessor'''
_a : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
"""simple docstring"""
a_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase__ , )
a_ = kwargs.pop('feature_extractor' )
a_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
a_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if visual_prompt is not None:
a_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
a_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if visual_prompt is not None and images is not None:
a_ = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
a_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
a_ = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _a ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase__ , )
return self.image_processor_class
@property
def _a ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase__ , )
return self.image_processor
| 536
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase:
"""simple docstring"""
def __init__( self , lowerCamelCase = 6 ) -> None:
"""simple docstring"""
lowercase__ : Node | None = None
lowercase__ : Node | None = None
self.create_linked_list(lowerCamelCase )
def __a ( self , lowerCamelCase ) -> None:
"""simple docstring"""
lowercase__ : Optional[int] = Node()
lowercase__ : Optional[Any] = current_node
lowercase__ : Optional[int] = current_node
lowercase__ : Dict = current_node
for _ in range(1 , lowerCamelCase ):
lowercase__ : Union[str, Any] = Node()
lowercase__ : Optional[int] = current_node
lowercase__ : Tuple = previous_node
lowercase__ : int = current_node
lowercase__ : Optional[int] = self.front
lowercase__ : Optional[Any] = previous_node
def __a ( self ) -> bool:
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __a ( self ) -> Any | None:
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def __a ( self , lowerCamelCase ) -> None:
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase__ : int = self.rear.next
if self.rear:
lowercase__ : Dict = data
def __a ( self ) -> Any:
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase__ : Tuple = self.front.data
lowercase__ : Union[str, Any] = None
return data
lowercase__ : Optional[Any] = self.front
lowercase__ : Optional[Any] = old_front.next
lowercase__ : Any = old_front.data
lowercase__ : Optional[Any] = None
return data
def __a ( self ) -> None:
"""simple docstring"""
if self.is_empty():
raise Exception("Empty Queue" )
def __a ( self ) -> None:
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class UpperCAmelCase:
"""simple docstring"""
def __init__( self ) -> None:
"""simple docstring"""
lowercase__ : Any | None = None
lowercase__ : Node | None = None
lowercase__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__a : int = logging.get_logger(__name__)
__a : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__a : Any = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
__a : Optional[int] = {'''mobilebert-uncased''': 5_1_2}
__a : Dict = {}
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Optional[int] = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Any = MobileBertTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="[UNK]" , lowerCamelCase="[SEP]" , lowerCamelCase="[PAD]" , lowerCamelCase="[CLS]" , lowerCamelCase="[MASK]" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Dict:
"""simple docstring"""
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
lowercase__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ : List[str] = getattr(lowerCamelCase , normalizer_state.pop("type" ) )
lowercase__ : Dict = do_lower_case
lowercase__ : Dict = strip_accents
lowercase__ : List[Any] = tokenize_chinese_chars
lowercase__ : Any = normalizer_class(**lowerCamelCase )
lowercase__ : Union[str, Any] = do_lower_case
def __a ( self , lowerCamelCase , lowerCamelCase=None ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Optional[int] = [self.sep_token_id]
lowercase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ : List[Any] = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
| 298
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Optional[Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Dict = b.T
SCREAMING_SNAKE_CASE_ :str = np.sum(np.square(a ) , axis=1 )
SCREAMING_SNAKE_CASE_ :List[Any] = np.sum(np.square(a ) , axis=0 )
SCREAMING_SNAKE_CASE_ :Optional[int] = np.matmul(a , a )
SCREAMING_SNAKE_CASE_ :Any = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :str = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE_ :str = squared_euclidean_distance(a , a )
return np.argmin(a , axis=1 )
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : List[str] = ["""pixel_values"""]
def __init__( self : Dict , UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(**UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = size if size is not None else {"height": 2_56, "width": 2_56}
SCREAMING_SNAKE_CASE_ :Optional[int] = get_size_dict(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :int = np.array(UpperCAmelCase) if clusters is not None else None
SCREAMING_SNAKE_CASE_ :List[Any] = do_resize
SCREAMING_SNAKE_CASE_ :Dict = size
SCREAMING_SNAKE_CASE_ :Optional[Any] = resample
SCREAMING_SNAKE_CASE_ :Dict = do_normalize
SCREAMING_SNAKE_CASE_ :Tuple = do_color_quantize
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ):
SCREAMING_SNAKE_CASE_ :str = get_size_dict(UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}")
return resize(
UpperCAmelCase , size=(size["height"], size["width"]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase)
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , ):
SCREAMING_SNAKE_CASE_ :int = rescale(image=UpperCAmelCase , scale=1 / 127.5 , data_format=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[str] = image - 1
return image
def _snake_case ( self : List[str] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ :Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ :Dict = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = get_size_dict(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ :Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ :List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE_ :Tuple = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE_ :Tuple = np.array(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Dict = make_list_of_images(UpperCAmelCase)
if not valid_images(UpperCAmelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True.")
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [to_numpy_array(UpperCAmelCase) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ :Tuple = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ :int = [self.normalize(image=UpperCAmelCase) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE_ :Optional[Any] = [to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE_ :Optional[int] = np.array(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = color_quantize(UpperCAmelCase , UpperCAmelCase).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE_ :Any = images.shape[0]
SCREAMING_SNAKE_CASE_ :Dict = images.reshape(UpperCAmelCase , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE_ :Any = list(UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE_ :List[Any] = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase) for image in images]
SCREAMING_SNAKE_CASE_ :str = {"input_ids": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase)
| 631
| 0
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: int ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: str ) -> None:
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" ,lowerCamelCase_ ,)
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
| 322
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
UpperCamelCase_ = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Any = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Dict = PRETRAINED_INIT_CONFIGURATION
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = BertTokenizer
def __init__( self: List[str] ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]="[UNK]" ,lowerCamelCase_: Tuple="[SEP]" ,lowerCamelCase_: Any="[PAD]" ,lowerCamelCase_: Optional[Any]="[CLS]" ,lowerCamelCase_: List[Any]="[MASK]" ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Union[str, Any]=None ,**lowerCamelCase_: Union[str, Any] ,) -> Optional[int]:
super().__init__(
lowerCamelCase_ ,tokenizer_file=lowerCamelCase_ ,do_lower_case=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,tokenize_chinese_chars=lowerCamelCase_ ,strip_accents=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,lowerCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,lowerCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,lowerCamelCase_ ) != tokenize_chinese_chars
):
UpperCAmelCase_ : List[str] = getattr(lowerCamelCase_ ,normalizer_state.pop("""type""" ) )
UpperCAmelCase_ : Any = do_lower_case
UpperCAmelCase_ : Optional[int] = strip_accents
UpperCAmelCase_ : Union[str, Any] = tokenize_chinese_chars
UpperCAmelCase_ : str = normalizer_class(**lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = do_lower_case
def A__ ( self: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict=None ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase_ : List[str] = self._tokenizer.model.save(lowerCamelCase_ ,name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 322
| 1
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def a ( A__ : Tuple , A__ : Optional[int] , A__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =UniSpeechSatForSequenceClassification.from_pretrained(A__ , config=A__ )
_lowercase =downstream_dict['projector.weight']
_lowercase =downstream_dict['projector.bias']
_lowercase =downstream_dict['model.post_net.linear.weight']
_lowercase =downstream_dict['model.post_net.linear.bias']
return model
def a ( A__ : str , A__ : Tuple , A__ : str ) -> Any:
"""simple docstring"""
_lowercase =UniSpeechSatForAudioFrameClassification.from_pretrained(A__ , config=A__ )
_lowercase =downstream_dict['model.linear.weight']
_lowercase =downstream_dict['model.linear.bias']
return model
def a ( A__ : List[Any] , A__ : Any , A__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
_lowercase =UniSpeechSatForXVector.from_pretrained(A__ , config=A__ )
_lowercase =downstream_dict['connector.weight']
_lowercase =downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_lowercase =downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_lowercase =downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_lowercase =downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
_lowercase =downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
_lowercase =downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
_lowercase =downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
_lowercase =downstream_dict['objective.W']
return model
@torch.no_grad()
def a ( A__ : Optional[Any] , A__ : List[str] , A__ : List[Any] , A__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =torch.load(A__ , map_location='cpu' )
_lowercase =checkpoint['Downstream']
_lowercase =UniSpeechSatConfig.from_pretrained(A__ )
_lowercase =WavaVecaFeatureExtractor.from_pretrained(
A__ , return_attention_mask=A__ , do_normalize=A__ )
_lowercase =hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
_lowercase =convert_classification(A__ , A__ , A__ )
elif arch.endswith('ForAudioFrameClassification' ):
_lowercase =convert_diarization(A__ , A__ , A__ )
elif arch.endswith('ForXVector' ):
_lowercase =convert_xvector(A__ , A__ , A__ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_lowercase =checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(A__ )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowercase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 291
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = False ) -> str:
'''simple docstring'''
_lowercase =scheduler
_lowercase =optimizers if isinstance(lowerCAmelCase , (list, tuple) ) else [optimizers]
_lowercase =split_batches
_lowercase =step_with_optimizer
_lowercase =GradientState()
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_lowercase =AcceleratorState().num_processes
for _ in range(lowerCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
else:
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
return self.scheduler.get_last_lr()
def A__ ( self ) -> Tuple:
'''simple docstring'''
return self.scheduler.state_dict()
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
self.scheduler.load_state_dict(lowerCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
return self.scheduler.get_lr()
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return self.scheduler.print_lr(*lowerCAmelCase , **lowerCAmelCase )
| 291
| 1
|
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.Linear(3 , 4 )
UpperCamelCase = nn.BatchNormad(4 )
UpperCamelCase = nn.Linear(4 , 5 )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def UpperCAmelCase ( self , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return output + 1
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ModelForTest()
UpperCamelCase = ModelHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(test_model._hf_hook , lowerCamelCase__ )
self.assertTrue(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__ , '''_hf_hook''' ) )
self.assertFalse(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ModelForTest()
UpperCamelCase = ModelHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ , append=lowerCamelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__ , '''_hf_hook''' ) )
self.assertFalse(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ModelForTest()
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = test_model(x + 1 )
UpperCamelCase = test_model(x + 2 )
UpperCamelCase = PreForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCamelCase = PreForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
UpperCamelCase = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-5 )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ModelForTest()
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = test_model(lowerCamelCase__ )
UpperCamelCase = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCamelCase = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
UpperCamelCase = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , output + 2 , atol=1e-5 )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ModelForTest()
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = test_model(lowerCamelCase__ )
UpperCamelCase = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
UpperCamelCase = True
UpperCamelCase = test_model(lowerCamelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = model(lowerCamelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCamelCase__ , AlignDevicesHook(io_same_device=lowerCamelCase__ ) )
UpperCamelCase = torch.randn(2 , 3 ).to(0 )
UpperCamelCase = model(lowerCamelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
UpperCamelCase = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
UpperCamelCase = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
UpperCamelCase = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , offload_buffers=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
UpperCamelCase = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , weights_map=model.state_dict() , offload_buffers=lowerCamelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
UpperCamelCase = torch.randn(2 , 3 )
UpperCamelCase = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 350
|
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : list[list[float]]):
UpperCamelCase = []
for data in source_data:
for i, el in enumerate(_UpperCAmelCase):
if len(_UpperCAmelCase) < i + 1:
data_lists.append([])
data_lists[i].append(float(_UpperCAmelCase))
return data_lists
def __snake_case ( _UpperCAmelCase : list[list[float]], _UpperCAmelCase : list[int]):
UpperCamelCase = []
for dlist, weight in zip(_UpperCAmelCase, _UpperCAmelCase):
UpperCamelCase = min(_UpperCAmelCase)
UpperCamelCase = max(_UpperCAmelCase)
UpperCamelCase = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)))
except ZeroDivisionError:
score.append(1)
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind))
except ZeroDivisionError:
score.append(0)
# weight not 0 or 1
else:
UpperCamelCase = f'Invalid weight of {weight:f} provided'
raise ValueError(_UpperCAmelCase)
score_lists.append(_UpperCAmelCase)
return score_lists
def __snake_case ( _UpperCAmelCase : list[list[float]]):
UpperCamelCase = [0 for i in range(len(score_lists[0]))]
for slist in score_lists:
for j, ele in enumerate(_UpperCAmelCase):
UpperCamelCase = final_scores[j] + ele
return final_scores
def __snake_case ( _UpperCAmelCase : list[list[float]], _UpperCAmelCase : list[int]):
UpperCamelCase = get_data(_UpperCAmelCase)
UpperCamelCase = calculate_each_score(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = generate_final_scores(_UpperCAmelCase)
# append scores to source data
for i, ele in enumerate(_UpperCAmelCase):
source_data[i].append(_UpperCAmelCase)
return source_data
| 350
| 1
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( a_ , a_ ) -> np.array:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = f"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Optional[int] = '1'
SCREAMING_SNAKE_CASE : Dict = 'f32le'
SCREAMING_SNAKE_CASE : List[str] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(a_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Any = ffmpeg_process.communicate(a_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
SCREAMING_SNAKE_CASE : Dict = output_stream[0]
SCREAMING_SNAKE_CASE : Optional[int] = np.frombuffer(a_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def __lowerCAmelCase ( a_ , a_ , a_ = "f32le" , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = f"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : List[Any] = '1'
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : List[str] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Optional[Any] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
SCREAMING_SNAKE_CASE : Dict = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : Dict = 'alsa'
SCREAMING_SNAKE_CASE : Dict = 'default'
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Dict = 'avfoundation'
SCREAMING_SNAKE_CASE : int = ':0'
elif system == "Windows":
SCREAMING_SNAKE_CASE : int = 'dshow'
SCREAMING_SNAKE_CASE : Tuple = 'default'
SCREAMING_SNAKE_CASE : str = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Dict = _ffmpeg_stream(a_ , a_ )
for item in iterator:
yield item
def __lowerCAmelCase ( a_ , a_ , a_ = None , a_ = None , a_ = "f32le" , ) -> List[Any]:
'''simple docstring'''
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = chunk_length_s
SCREAMING_SNAKE_CASE : str = ffmpeg_microphone(a_ , a_ , format_for_conversion=a_ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : int = np.intaa
SCREAMING_SNAKE_CASE : Tuple = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Optional[Any] = np.floataa
SCREAMING_SNAKE_CASE : Optional[int] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : List[Any] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a_ , (int, float) ):
SCREAMING_SNAKE_CASE : Optional[int] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Any = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Optional[Any] = datetime.timedelta(seconds=a_ )
for item in chunk_bytes_iter(a_ , a_ , stride=(stride_left, stride_right) , stream=a_ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : List[Any] = np.frombuffer(item['raw'] , dtype=a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Optional[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( a_ , a_ , a_ , a_ = False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = B''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
SCREAMING_SNAKE_CASE : List[Any] = 0
for raw in iterator:
acc += raw
if stream and len(a_ ) < chunk_len:
SCREAMING_SNAKE_CASE : Tuple = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a_ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : Dict = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : Any = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
SCREAMING_SNAKE_CASE : str = False
yield item
SCREAMING_SNAKE_CASE : Optional[int] = stride_left
SCREAMING_SNAKE_CASE : Any = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a_ ) > stride_left:
SCREAMING_SNAKE_CASE : Dict = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : Dict = False
yield item
def __lowerCAmelCase ( a_ , a_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(a_ , stdout=subprocess.PIPE , bufsize=a_ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : Optional[Any] = ffmpeg_process.stdout.read(a_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 251
|
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def __lowerCAmelCase ( a_ ) -> Optional[int]:
'''simple docstring'''
if hor == 128:
SCREAMING_SNAKE_CASE : Union[str, Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
SCREAMING_SNAKE_CASE : List[Any] = (32, 128, 256)
SCREAMING_SNAKE_CASE : Optional[Any] = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
SCREAMING_SNAKE_CASE : str = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
SCREAMING_SNAKE_CASE : Union[str, Any] = (32, 64, 128, 256)
SCREAMING_SNAKE_CASE : List[str] = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.state_dict()
SCREAMING_SNAKE_CASE : str = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_5536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
SCREAMING_SNAKE_CASE : Optional[int] = UNetaDModel(**a_ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(a_ )
hf_value_function.load_state_dict(a_ )
torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , 'w' ) as f:
json.dump(a_ , a_ )
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_5536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
SCREAMING_SNAKE_CASE : List[Any] = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
SCREAMING_SNAKE_CASE : int = model
SCREAMING_SNAKE_CASE : str = UNetaDModel(**a_ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE : Any = state_dict.pop(a_ )
hf_value_function.load_state_dict(a_ )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(a_ , a_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 251
| 1
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index == number_of_items:
return 0
__a = 0
__a = 0
__a = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
__a = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 547
| 0
|
lowerCamelCase__ = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase__ = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase__ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 524
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=12 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=0.02 , lowerCAmelCase_=0 , lowerCAmelCase_=None , ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = projection_dim
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = scope
__lowercase = bos_token_id
def snake_case__ ( self ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__lowercase = input_mask.numpy()
__lowercase , __lowercase = input_mask.shape
__lowercase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase_ ):
__lowercase = 1
__lowercase = 0
__lowercase = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCAmelCase_ )
def snake_case__ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = TFBlipTextModel(config=lowerCAmelCase_ )
__lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , training=lowerCAmelCase_ )
__lowercase = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case ( __snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def snake_case__ ( self ):
__lowercase = BlipTextModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def snake_case__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def snake_case__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFBlipTextModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCAmelCase_ )
| 321
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : List[str] = None
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase__ : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
lowercase__ : Optional[int] = {
"camembert-base": 5_12,
}
lowercase__ : Union[str, Any] = "▁"
class _UpperCAmelCase ( _SCREAMING_SNAKE_CASE):
_lowerCAmelCase : str = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : List[str] = ["input_ids", "attention_mask"]
_lowerCAmelCase : Tuple = CamembertTokenizer
def __init__( self : List[str] , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : List[Any]="<s>" , lowercase_ : Union[str, Any]="</s>" , lowercase_ : int="</s>" , lowercase_ : int="<s>" , lowercase_ : Union[str, Any]="<unk>" , lowercase_ : List[str]="<pad>" , lowercase_ : Tuple="<mask>" , lowercase_ : Dict=["<s>NOTUSED", "</s>NOTUSED"] , **lowercase_ : Any , ):
snake_case_ : int = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
snake_case_ : int = vocab_file
snake_case_ : Any = False if not self.vocab_file else True
def _snake_case ( self : Optional[int] , lowercase_ : Any , lowercase_ : Optional[Any] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : int = [self.cls_token_id]
snake_case_ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Dict , lowercase_ : List[Any] , lowercase_ : Any = None ):
snake_case_ : List[str] = [self.sep_token_id]
snake_case_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Any , lowercase_ : Tuple , lowercase_ : List[Any] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : List[Any] = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 703
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase__ : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class _UpperCAmelCase :
_lowerCAmelCase : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """The column name of the images in the files."""})
_lowerCAmelCase : Optional[str] = field(default=lowerCAmelCase__ , metadata={"""help""": """A folder containing the training data."""})
_lowerCAmelCase : Optional[str] = field(default=lowerCAmelCase__ , metadata={"""help""": """A folder containing the validation data."""})
_lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""})
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : List[Any] = {}
if self.train_dir is not None:
snake_case_ : str = self.train_dir
if self.validation_dir is not None:
snake_case_ : Union[str, Any] = self.validation_dir
snake_case_ : Tuple = data_files if data_files else None
@dataclass
class _UpperCAmelCase :
_lowerCAmelCase : str = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""})
_lowerCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_lowerCAmelCase : str = field(default=lowerCAmelCase__ , metadata={"""help""": """Name or path of preprocessor config."""})
_lowerCAmelCase : bool = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_lowerCAmelCase : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""})
_lowerCAmelCase : bool = field(
default=lowerCAmelCase__ , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""})
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""})
def __lowercase ( _a ):
snake_case_ : Tuple = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def __lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_, snake_case_, snake_case_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_, snake_case_, snake_case_ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , _a , _a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : List[str] = training_args.get_process_log_level()
logger.setLevel(_a )
transformers.utils.logging.set_verbosity(_a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
snake_case_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
snake_case_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case_ : Optional[Any] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _a ) and data_args.train_val_split > 0.0:
snake_case_ : List[Any] = ds['''train'''].train_test_split(data_args.train_val_split )
snake_case_ : Tuple = split['''train''']
snake_case_ : str = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Optional[int] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case_ : List[Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **_a )
elif model_args.model_name_or_path:
snake_case_ : Dict = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_a )
else:
snake_case_ : Optional[int] = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
snake_case_ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_a )
elif model_args.model_name_or_path:
snake_case_ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_a )
else:
snake_case_ : Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
snake_case_ : Tuple = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
snake_case_ : Tuple = ViTMAEForPreTraining(_a )
if training_args.do_train:
snake_case_ : List[str] = ds['''train'''].column_names
else:
snake_case_ : Optional[Any] = ds['''validation'''].column_names
if data_args.image_column_name is not None:
snake_case_ : Tuple = data_args.image_column_name
elif "image" in column_names:
snake_case_ : Tuple = '''image'''
elif "img" in column_names:
snake_case_ : str = '''img'''
else:
snake_case_ : Union[str, Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
snake_case_ : str = image_processor.size['''shortest_edge''']
else:
snake_case_ : Dict = (image_processor.size['''height'''], image_processor.size['''width'''])
snake_case_ : str = Compose(
[
Lambda(lambda _a : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_a , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_a ):
snake_case_ : Tuple = [transforms(_a ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
snake_case_ : List[str] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_a )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
snake_case_ : Optional[Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_a )
# Compute absolute learning rate
snake_case_ : Any = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
snake_case_ : Union[str, Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
snake_case_ : str = Trainer(
model=_a , args=_a , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
snake_case_ : Any = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : str = last_checkpoint
snake_case_ : List[str] = trainer.train(resume_from_checkpoint=_a )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case_ : Any = trainer.evaluate()
trainer.log_metrics('''eval''' , _a )
trainer.save_metrics('''eval''' , _a )
# Write model card and (optionally) push to hub
snake_case_ : Optional[int] = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_a )
else:
trainer.create_model_card(**_a )
def __lowercase ( _a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 485
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCAmelCase )
class _UpperCAmelCase ( _lowerCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a__ : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
a__ : ClassVar[Features] = Features({"text": Value("string" )} )
a__ : ClassVar[Features] = Features({"summary": Value("string" )} )
a__ : str = "text"
a__ : str = "summary"
@property
def a ( self : Dict ):
return {self.text_column: "text", self.summary_column: "summary"}
| 49
|
"""simple docstring"""
from collections.abc import Sequence
def _A( lowerCAmelCase , lowerCAmelCase ):
return sum(c * (x**i) for i, c in enumerate(lowerCAmelCase ) )
def _A( lowerCAmelCase , lowerCAmelCase ):
A__ : str = 0.0
for coeff in reversed(lowerCAmelCase ):
A__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 363
| 0
|
"""simple docstring"""
from math import pow, sqrt
def _UpperCAmelCase ( *__lowerCamelCase : float ) -> bool:
_snake_case = len(__lowerCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 711
|
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
UpperCAmelCase__ = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> List[Any]:
_snake_case = {}
state_dict.pop('''pixel_mean''' , __lowerCamelCase )
state_dict.pop('''pixel_std''' , __lowerCamelCase )
_snake_case = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_snake_case = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
_snake_case = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(2 ) )
if layer_nb == 0:
_snake_case = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
_snake_case = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
_snake_case = key.replace('''layers.2''' , '''proj_out''' )
_snake_case = value
_snake_case = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]="ybelkada/segment-anything" ) -> List[str]:
_snake_case = hf_hub_download(__lowerCamelCase , f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_snake_case = SamConfig()
elif "sam_vit_l" in model_name:
_snake_case = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_snake_case = SamConfig(
vision_config=__lowerCamelCase , )
elif "sam_vit_h" in model_name:
_snake_case = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_snake_case = SamConfig(
vision_config=__lowerCamelCase , )
_snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' )
_snake_case = replace_keys(__lowerCamelCase )
_snake_case = SamImageProcessor()
_snake_case = SamProcessor(image_processor=__lowerCamelCase )
_snake_case = SamModel(__lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
_snake_case = hf_model.to('''cuda''' )
_snake_case = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
_snake_case = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert('''RGB''' )
_snake_case = [[[4_00, 6_50]]]
_snake_case = [[1]]
_snake_case = processor(images=np.array(__lowerCamelCase ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_snake_case = hf_model(**__lowerCamelCase )
_snake_case = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
_snake_case = processor(
images=np.array(__lowerCamelCase ) , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_snake_case = hf_model(**__lowerCamelCase )
_snake_case = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
_snake_case = ((75, 2_75, 17_25, 8_50),)
_snake_case = processor(images=np.array(__lowerCamelCase ) , input_boxes=__lowerCamelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_snake_case = hf_model(**__lowerCamelCase )
_snake_case = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
_snake_case = [[[4_00, 6_50], [8_00, 6_50]]]
_snake_case = [[1, 1]]
_snake_case = processor(
images=np.array(__lowerCamelCase ) , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_snake_case = hf_model(**__lowerCamelCase )
_snake_case = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
UpperCAmelCase__ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
UpperCAmelCase__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 430
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : Any ,A_ : int=13 ,A_ : str=7 ,A_ : Tuple=True ,A_ : str=True ,A_ : str=False ,A_ : List[str]=True ,A_ : str=99 ,A_ : str=32 ,A_ : Optional[int]=5 ,A_ : Optional[Any]=4 ,A_ : str=37 ,A_ : Optional[Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : Any=0.1 ,A_ : Optional[Any]=512 ,A_ : str=16 ,A_ : int=2 ,A_ : Optional[Any]=0.02 ,A_ : str=3 ,A_ : str=4 ,A_ : List[str]=None ,) -> str:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : Optional[int] ,A_ : Any ,A_ : Optional[Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ) -> List[Any]:
A = LlamaModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Dict ,) -> List[str]:
A = True
A = LlamaModel(A_ )
model.to(A_ )
model.eval()
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,)
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,)
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict ,A_ : Dict ,A_ : Tuple ,A_ : Tuple ,A_ : Dict ,) -> Union[str, Any]:
A = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ,A_ : Any ,A_ : int ,A_ : List[str] ,A_ : Tuple ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : int ,) -> List[Any]:
A = True
A = True
A = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,use_cache=A_ ,)
A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A = torch.cat([input_ids, next_tokens] ,dim=-1 )
A = torch.cat([input_mask, next_mask] ,dim=-1 )
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,output_hidden_states=A_ ,)['hidden_states'][0]
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,past_key_values=A_ ,output_hidden_states=A_ ,)['hidden_states'][0]
# select random slice
A = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A = output_from_no_past[:, -3:, random_slice_idx].detach()
A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_lowerCamelCase: List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
_lowerCamelCase: Any = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase: int = False
_lowerCamelCase: List[str] = False
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = LlamaModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A = type
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = input_dict['input_ids']
A = input_ids.ne(1 ).to(A_ )
A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = 'single_label_classification'
A = input_dict['input_ids']
A = input_ids.ne(1 ).to(A_ )
A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = 'multi_label_classification'
A = input_dict['input_ids']
A = input_ids.ne(1 ).to(A_ )
A = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> str:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = ids_tensor([1, 10] ,config.vocab_size )
A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A = LlamaModel(A_ )
original_model.to(A_ )
original_model.eval()
A = original_model(A_ ).last_hidden_state
A = original_model(A_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A = {'type': scaling_type, 'factor': 10.0}
A = LlamaModel(A_ )
scaled_model.to(A_ )
scaled_model.eval()
A = scaled_model(A_ ).last_hidden_state
A = scaled_model(A_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
A = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
A = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
A = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
A = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
A = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
A = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
A = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
A = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
A = model(torch.tensor(A_ ) )
A = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
A = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
A = 'Simply put, the theory of relativity states that '
A = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
A = tokenizer.encode(A_ ,return_tensors='pt' )
A = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=A_ )
# greedy generation outputs
A = model.generate(A_ ,max_new_tokens=64 ,top_p=A_ ,temperature=1 ,do_sample=A_ )
A = tokenizer.decode(generated_ids[0] ,skip_special_tokens=A_ )
self.assertEqual(A_ ,A_ )
| 91
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 91
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCAmelCase ( __lowerCamelCase:int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowercase = [num for num in range(3, 100001, 2) if not is_prime(num)]
def _lowerCAmelCase ( __lowerCamelCase:int ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
__magic_name__ = []
for num in range(len(__lowerCamelCase ) ):
__magic_name__ = 0
while 2 * i * i <= odd_composites[num]:
__magic_name__ = odd_composites[num] - 2 * i * i
if is_prime(__lowerCamelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCamelCase ) == n:
return list_nums
return []
def _lowerCAmelCase ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 468
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( snake_case_ ):
UpperCAmelCase__ = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ = '''BlipImageProcessor'''
UpperCAmelCase__ = '''AutoTokenizer'''
def __init__( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : int ) -> str:
__magic_name__ = False
super().__init__(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = self.image_processor
def __call__( self : List[Any] , __lowerCamelCase : ImageInput = None , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : Optional[int] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__magic_name__ = self.tokenizer
__magic_name__ = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
return text_encoding
# add pixel_values
__magic_name__ = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
if text is not None:
__magic_name__ = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
else:
__magic_name__ = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def _snake_case ( self : Union[str, Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ) -> Dict:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : str , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Tuple ) -> Optional[Any]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _snake_case ( self : List[str] ) -> Optional[Any]:
__magic_name__ = self.tokenizer.model_input_names
__magic_name__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 468
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_A : List[str] = logging.get_logger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : int = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = 1 / 2_55 , A_ = True , A_ = None , A_ = True , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 2_24}
SCREAMING_SNAKE_CASE__ = get_size_dict(A_ , default_to_square=A_ )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {'''height''': 2_56, '''width''': 2_56}
SCREAMING_SNAKE_CASE__ = get_size_dict(A_ , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = do_flip_channel_order
def lowercase_ ( self , A_ , A_ , A_ = PIL.Image.BILINEAR , A_ = None , **A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ = get_resize_output_image_size(A_ , size=size['''shortest_edge'''] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def lowercase_ ( self , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(A_ , size=(size['''height'''], size['''width''']) , data_format=A_ , **A_ )
def lowercase_ ( self , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
return flip_channel_order(A_ , data_format=A_ )
def lowercase_ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(A_ , default_to_square=A_ )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ = get_size_dict(A_ , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(A_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=A_ , scale=A_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
SCREAMING_SNAKE_CASE__ = [self.flip_channel_order(image=A_ ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(A_ , A_ ) for image in images]
SCREAMING_SNAKE_CASE__ = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A_ ):
SCREAMING_SNAKE_CASE__ = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ = []
for idx in range(len(A_ ) ):
SCREAMING_SNAKE_CASE__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ )
SCREAMING_SNAKE_CASE__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
SCREAMING_SNAKE_CASE__ = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 100
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = "cpu" , UpperCamelCase = None ) -> None:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase , map_location=UpperCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
__UpperCAmelCase : Optional[Any] = v.half()
if save_path is None: # overwrite src_path
__UpperCAmelCase : str = src_path
torch.save(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 77
| 0
|
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
def decorator(a__ ):
_lowerCAmelCase =getattr(a__ , 'handle_key' , [] )
handle += [key]
setattr(a__ , 'handle_key' , a__ )
return func
return decorator
def UpperCamelCase__ ( *a__ ):
'''simple docstring'''
def decorator(a__ ):
_lowerCAmelCase =getattr(a__ , 'handle_key' , [] )
handle += keys
setattr(a__ , 'handle_key' , a__ )
return func
return decorator
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __new__( cls , __A , __A , __A ) -> Tuple:
_lowerCAmelCase =super().__new__(cls , __A , __A , __A )
if not hasattr(__A , 'key_handler' ):
setattr(__A , 'key_handler' , {} )
setattr(__A , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
_lowerCAmelCase =getattr(__A , 'handle_key' , [] )
for key in handled_keys:
_lowerCAmelCase =value
return new_cls
@staticmethod
def UpperCamelCase__ ( cls ) -> Tuple:
_lowerCAmelCase =get_character()
if char != KEYMAP["undefined"]:
_lowerCAmelCase =ord(__A )
_lowerCAmelCase =cls.key_handler.get(__A )
if handler:
_lowerCAmelCase =char
return handler(cls )
else:
return None
def UpperCamelCase__ ( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 58
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 1
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : Tuple = logging.get_logger(__name__)
a_ : Dict = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class _snake_case ( A__ ):
_lowercase : List[str] = '''conditional_detr'''
_lowercase : Optional[Any] = ['''past_key_values''']
_lowercase : Optional[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , a=True , a=None , a=3 , a=300 , a=6 , a=2048 , a=8 , a=6 , a=2048 , a=8 , a=0.0 , a=0.0 , a=True , a="relu" , a=256 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=1.0 , a=False , a="sine" , a="resnet50" , a=True , a=False , a=2 , a=5 , a=2 , a=1 , a=1 , a=2 , a=5 , a=2 , a=0.25 , **a , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
SCREAMING_SNAKE_CASE = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(a , a):
SCREAMING_SNAKE_CASE = backbone_config.get('model_type')
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE = config_class.from_dict(a)
SCREAMING_SNAKE_CASE = use_timm_backbone
SCREAMING_SNAKE_CASE = backbone_config
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = init_xavier_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = auxiliary_loss
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = backbone
SCREAMING_SNAKE_CASE = use_pretrained_backbone
SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE = class_cost
SCREAMING_SNAKE_CASE = bbox_cost
SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE = mask_loss_coefficient
SCREAMING_SNAKE_CASE = dice_loss_coefficient
SCREAMING_SNAKE_CASE = cls_loss_coefficient
SCREAMING_SNAKE_CASE = bbox_loss_coefficient
SCREAMING_SNAKE_CASE = giou_loss_coefficient
SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=a , **a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
class _snake_case ( A__ ):
_lowercase : int = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def SCREAMING_SNAKE_CASE__ ( self) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return 12
| 73
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowerCAmelCase = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_lowerCAmelCase = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_lowerCAmelCase = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def snake_case_( self )-> Dict:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , )-> List[Any]:
lowercase__ = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowercase__ = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
lowercase__ = TER(
normalized=_lowerCamelCase , no_punct=_lowerCamelCase , asian_support=_lowerCamelCase , case_sensitive=_lowerCamelCase , )
lowercase__ = sb_ter.corpus_score(_lowerCamelCase , _lowerCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 161
| 0
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
a_ : List[str] = """src/transformers"""
# Matches is_xxx_available()
a_ : Union[str, Any] = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
a_ : List[str] = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ : List[Any] = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
a_ : Optional[Any] = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
a_ : List[str] = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ : List[str] = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
a_ : str = re.compile(R"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ : List[Any] = re.compile(R"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
a_ : Dict = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
a_ : Tuple = re.compile(R"""^\s*try:""")
# Catches a line with else:
a_ : str = re.compile(R"""^\s*else:""")
def __snake_case ( UpperCAmelCase_ : int ):
if _re_test_backend.search(UpperCAmelCase_ ) is None:
return None
lowerCamelCase_ = [b[0] for b in _re_backend.findall(UpperCAmelCase_ )]
backends.sort()
return "_and_".join(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : Any ):
with open(UpperCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = 0
while line_index < len(UpperCAmelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCAmelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCamelCase_ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCamelCase_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCAmelCase_ ):
lowerCamelCase_ = _re_one_line_import_struct.search(UpperCAmelCase_ ).groups()[0]
lowerCamelCase_ = re.findall(r"\[([^\]]+)\]" , UpperCAmelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCamelCase_ = _re_import_struct_key_value.search(UpperCAmelCase_ )
if single_line_import_search is not None:
lowerCamelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCAmelCase_ ) > 0]
objects.extend(UpperCAmelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCamelCase_ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCamelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCamelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCamelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCamelCase_ = lines[line_index]
if _re_import_struct_add_one.search(UpperCAmelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCAmelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCAmelCase_ ) is not None:
lowerCamelCase_ = _re_import_struct_add_many.search(UpperCAmelCase_ ).groups()[0].split(", " )
lowerCamelCase_ = [obj[1:-1] for obj in imports if len(UpperCAmelCase_ ) > 0]
objects.extend(UpperCAmelCase_ )
elif _re_between_brackets.search(UpperCAmelCase_ ) is not None:
lowerCamelCase_ = _re_between_brackets.search(UpperCAmelCase_ ).groups()[0].split(", " )
lowerCamelCase_ = [obj[1:-1] for obj in imports if len(UpperCAmelCase_ ) > 0]
objects.extend(UpperCAmelCase_ )
elif _re_quote_object.search(UpperCAmelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCAmelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCamelCase_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCamelCase_ = []
while (
line_index < len(UpperCAmelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _re_import.search(UpperCAmelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCamelCase_ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCAmelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCamelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCamelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCamelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _re_import.search(UpperCAmelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCamelCase_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ):
def find_duplicates(UpperCAmelCase_ : Union[str, Any] ):
return [k for k, v in collections.Counter(UpperCAmelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCamelCase_ = []
for key in import_dict_objects.keys():
lowerCamelCase_ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCamelCase_ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCamelCase_ = "base imports" if key == "none" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def __snake_case ( ):
lowerCamelCase_ = []
for root, _, files in os.walk(UpperCAmelCase_ ):
if "__init__.py" in files:
lowerCamelCase_ = os.path.join(UpperCAmelCase_ , "__init__.py" )
lowerCamelCase_ = parse_init(UpperCAmelCase_ )
if objects is not None:
lowerCamelCase_ = analyze_results(*UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
lowerCamelCase_ = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(UpperCAmelCase_ ) )
if len(UpperCAmelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCAmelCase_ ) )
def __snake_case ( ):
lowerCamelCase_ = []
for path, directories, files in os.walk(UpperCAmelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCAmelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCAmelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCamelCase_ = str((Path(UpperCAmelCase_ ) / folder).relative_to(UpperCAmelCase_ ) )
lowerCamelCase_ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCAmelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCamelCase_ = str((Path(UpperCAmelCase_ ) / fname).relative_to(UpperCAmelCase_ ) )
lowerCamelCase_ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCAmelCase_ )
return submodules
a_ : Any = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def __snake_case ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowerCamelCase_ = direct_transformers_import(UpperCAmelCase_ )
lowerCamelCase_ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCAmelCase_ , "__init__.py" ) , "r" ) as f:
lowerCamelCase_ = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" , UpperCAmelCase_ ) ) )
lowerCamelCase_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCAmelCase_ ) > 0:
lowerCamelCase_ = "\n".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 707
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : Dict = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "unispeech"
def __init__( self , UpperCamelCase=32 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.02 , UpperCamelCase=1e-5 , UpperCamelCase="group" , UpperCamelCase="gelu" , UpperCamelCase=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase=False , UpperCamelCase=128 , UpperCamelCase=16 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=0.05 , UpperCamelCase=10 , UpperCamelCase=2 , UpperCamelCase=0.0 , UpperCamelCase=10 , UpperCamelCase=0 , UpperCamelCase=320 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=100 , UpperCamelCase=256 , UpperCamelCase=256 , UpperCamelCase=0.1 , UpperCamelCase="mean" , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=256 , UpperCamelCase=80 , UpperCamelCase=0 , UpperCamelCase=1 , UpperCamelCase=2 , UpperCamelCase=0.5 , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = feat_extract_norm
lowerCamelCase_ = feat_extract_activation
lowerCamelCase_ = list(UpperCamelCase )
lowerCamelCase_ = list(UpperCamelCase )
lowerCamelCase_ = list(UpperCamelCase )
lowerCamelCase_ = conv_bias
lowerCamelCase_ = num_conv_pos_embeddings
lowerCamelCase_ = num_conv_pos_embedding_groups
lowerCamelCase_ = len(self.conv_dim )
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = feat_proj_dropout
lowerCamelCase_ = final_dropout
lowerCamelCase_ = layerdrop
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_ctc_classes
lowerCamelCase_ = vocab_size
lowerCamelCase_ = do_stable_layer_norm
lowerCamelCase_ = use_weighted_layer_sum
lowerCamelCase_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ = apply_spec_augment
lowerCamelCase_ = mask_time_prob
lowerCamelCase_ = mask_time_length
lowerCamelCase_ = mask_time_min_masks
lowerCamelCase_ = mask_feature_prob
lowerCamelCase_ = mask_feature_length
lowerCamelCase_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase_ = num_codevectors_per_group
lowerCamelCase_ = num_codevector_groups
lowerCamelCase_ = contrastive_logits_temperature
lowerCamelCase_ = feat_quantizer_dropout
lowerCamelCase_ = num_negatives
lowerCamelCase_ = codevector_dim
lowerCamelCase_ = proj_codevector_dim
lowerCamelCase_ = diversity_loss_weight
# ctc loss
lowerCamelCase_ = ctc_loss_reduction
lowerCamelCase_ = ctc_zero_infinity
# pretraining loss
lowerCamelCase_ = replace_prob
@property
def snake_case ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 445
| 0
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="None" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = relative_attention
_UpperCAmelCase = position_biased_input
_UpperCAmelCase = pos_att_type
_UpperCAmelCase = scope
def UpperCAmelCase ( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = TFDebertaVaModel(config=_lowercase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(_lowercase )
_UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = TFDebertaVaForMaskedLM(config=_lowercase )
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFDebertaVaForSequenceClassification(config=_lowercase )
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFDebertaVaForTokenClassification(config=_lowercase )
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = TFDebertaVaForQuestionAnswering(config=_lowercase )
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _A ( a_ , a_ , unittest.TestCase ):
__a = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__a = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__a = False
__a = False
def UpperCAmelCase ( self ):
_UpperCAmelCase = TFDebertaVaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def UpperCAmelCase ( self ):
_UpperCAmelCase = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(_lowercase )
@require_tf
class _A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase ( self ):
pass
@slow
def UpperCAmelCase ( self ):
_UpperCAmelCase = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_UpperCAmelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCAmelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase = model(_lowercase , attention_mask=_lowercase )[0]
_UpperCAmelCase = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _lowercase , atol=1e-4 )
| 518
|
"""simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Tuple:
if height >= 1:
move_tower(height - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
move_disk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
move_tower(height - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->List[str]:
print('''moving disk from''' , SCREAMING_SNAKE_CASE_ , '''to''' , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) ->Optional[Any]:
_lowerCamelCase : Optional[int] = int(input('''Height of hanoi: ''' ).strip() )
move_tower(SCREAMING_SNAKE_CASE_ , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 434
| 0
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
"""simple docstring"""
def __init__( self : Optional[int] , snake_case : Tuple , snake_case : str=13 , snake_case : Tuple=7 , snake_case : Dict=True , snake_case : List[str]=True , snake_case : int=False , snake_case : Dict=True , snake_case : str=99 , snake_case : List[str]=32 , snake_case : List[str]=5 , snake_case : List[str]=4 , snake_case : List[Any]=37 , snake_case : int="gelu" , snake_case : Union[str, Any]=0.1 , snake_case : Union[str, Any]=0.1 , snake_case : List[Any]=512 , snake_case : Union[str, Any]=16 , snake_case : Dict=2 , snake_case : Optional[int]=0.02 , snake_case : Any=3 , snake_case : List[Any]=4 , snake_case : Optional[int]=None , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : int = seq_length
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : Dict = use_input_mask
__UpperCAmelCase : List[Any] = use_token_type_ids
__UpperCAmelCase : str = use_labels
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : List[Any] = type_sequence_label_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : Tuple = num_choices
__UpperCAmelCase : List[Any] = scope
def lowerCamelCase__ ( self : str ) -> int:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , use_stable_embedding=snake_case , )
def lowerCamelCase__ ( self : Tuple , snake_case : Optional[Any] , snake_case : str , snake_case : Union[str, Any] , snake_case : Any , snake_case : int , snake_case : Optional[Any] , snake_case : Tuple ) -> int:
__UpperCAmelCase : Optional[int] = OpenLlamaModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case )
__UpperCAmelCase : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Dict , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Dict , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Dict , ) -> Any:
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[str] = OpenLlamaModel(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
__UpperCAmelCase : Tuple = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
__UpperCAmelCase : Optional[Any] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : int , snake_case : str , snake_case : str , ) -> Tuple:
__UpperCAmelCase : str = OpenLlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[Any] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : List[str] , snake_case : int , snake_case : List[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Any , snake_case : str , snake_case : Optional[int] , ) -> Optional[Any]:
__UpperCAmelCase : int = True
__UpperCAmelCase : Any = True
__UpperCAmelCase : Union[str, Any] = OpenLlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
__UpperCAmelCase : str = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
__UpperCAmelCase : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCAmelCase : int = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )['''hidden_states'''][0]
__UpperCAmelCase : Any = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )['''hidden_states'''][0]
# select random slice
__UpperCAmelCase : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def lowerCamelCase__ ( self : int ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
__UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE : List[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Dict = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : Tuple = OpenLlamaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[Any] ) -> Dict:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Tuple = type
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Tuple ) -> str:
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Optional[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1 ).to(snake_case )
__UpperCAmelCase : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Optional[int] = OpenLlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Tuple = 3
__UpperCAmelCase : Union[str, Any] = '''single_label_classification'''
__UpperCAmelCase : Optional[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Union[str, Any] = input_ids.ne(1 ).to(snake_case )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Optional[int] = OpenLlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Dict = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Any ) -> int:
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = 3
__UpperCAmelCase : str = '''multi_label_classification'''
__UpperCAmelCase : str = input_dict['''input_ids''']
__UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(snake_case )
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCAmelCase : List[Any] = OpenLlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase__ ( self : Any , snake_case : Any ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = ids_tensor([1, 10] , config.vocab_size )
__UpperCAmelCase : Any = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : List[str] = OpenLlamaModel(snake_case )
original_model.to(snake_case )
original_model.eval()
__UpperCAmelCase : List[str] = original_model(snake_case ).last_hidden_state
__UpperCAmelCase : str = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Dict = {'''type''': scaling_type, '''factor''': 10.0}
__UpperCAmelCase : List[Any] = OpenLlamaModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
__UpperCAmelCase : List[str] = scaled_model(snake_case ).last_hidden_state
__UpperCAmelCase : Optional[Any] = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
| 266
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__UpperCAmelCase :Optional[int] = ["bert-base-uncased", "bert-base-cased"]
__UpperCAmelCase :str = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class a ( tf.keras.Model ):
"""simple docstring"""
def __init__( self : List[str] , snake_case : List[str] ) -> str:
super().__init__()
__UpperCAmelCase : List[str] = tokenizer
__UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(snake_case )
__UpperCAmelCase : int = TFAutoModel.from_config(snake_case )
def lowerCamelCase__ ( self : List[Any] , snake_case : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = self.tokenizer(snake_case )
__UpperCAmelCase : Optional[Any] = self.bert(**snake_case )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
super().setUp()
__UpperCAmelCase : Tuple = [
BertTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__UpperCAmelCase : Any = [TFBertTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case , use_fast_bert_tokenizer=snake_case )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__UpperCAmelCase : Optional[int] = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__UpperCAmelCase : Optional[int] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__UpperCAmelCase : Any = tokenizer(snake_case , return_tensors='''tf''' , padding='''longest''' )
__UpperCAmelCase : Optional[int] = tf_tokenizer(snake_case )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def lowerCamelCase__ ( self : List[Any] ) -> str:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : Any = tf_tokenizer(self.paired_sentences )
__UpperCAmelCase : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : Optional[int] = tf.function(snake_case )
for test_inputs in (self.test_sentences, self.paired_sentences):
__UpperCAmelCase : int = tf.constant(snake_case )
__UpperCAmelCase : Tuple = compiled_tokenizer(snake_case )
__UpperCAmelCase : Optional[int] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase__ ( self : str ) -> str:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : List[Any] = ModelToSave(tokenizer=snake_case )
__UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(self.test_sentences )
__UpperCAmelCase : Tuple = model(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCAmelCase : Any = Path(snake_case ) / '''saved.model'''
model.save(snake_case )
__UpperCAmelCase : str = tf.keras.models.load_model(snake_case )
__UpperCAmelCase : Optional[int] = loaded_model(snake_case )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 266
| 1
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ComputeEnvironment.AMAZON_SAGEMAKER
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 'ml.p3.2xlarge'
SCREAMING_SNAKE_CASE = 'accelerate_sagemaker_execution_role'
SCREAMING_SNAKE_CASE = 'hf-sm'
SCREAMING_SNAKE_CASE = 'us-east-1'
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 'accelerate-sagemaker-1'
SCREAMING_SNAKE_CASE = '1.6'
SCREAMING_SNAKE_CASE = '4.4'
SCREAMING_SNAKE_CASE = 'train.py'
SCREAMING_SNAKE_CASE = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
SCREAMING_SNAKE_CASE = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a =_convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] , __snake_case )
assert isinstance(converted_args['do_train'] , __snake_case )
assert isinstance(converted_args['epochs'] , __snake_case )
assert isinstance(converted_args['learning_rate'] , __snake_case )
assert isinstance(converted_args['max_steps'] , __snake_case )
with pytest.raises(__snake_case ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 242
|
def UpperCamelCase_( _snake_case : int = 600851475143 ):
"""simple docstring"""
try:
__a =int(_snake_case )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
__a =2
__a =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__a =i
while n % i == 0:
__a =n // i
i += 1
return int(_snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 242
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 710
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 629
| 0
|
'''simple docstring'''
def lowerCAmelCase__ ( lowerCamelCase : Any ):
_A : Any = 0
_A : Any = len(lowerCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 ,lowerCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowerCAmelCase__ ( lowerCamelCase : List[str] ):
if len(lowerCamelCase ) <= 1:
return arr, 0
_A : Optional[Any] = len(lowerCamelCase ) // 2
_A : Dict = arr[0:mid]
_A : str = arr[mid:]
_A , _A : List[str] = count_inversions_recursive(lowerCamelCase )
_A , _A : Any = count_inversions_recursive(lowerCamelCase )
_A , _A : List[Any] = _count_cross_inversions(lowerCamelCase ,lowerCamelCase )
_A : Tuple = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : str ):
_A : Union[str, Any] = []
_A : List[str] = 0
while i < len(lowerCamelCase ) and j < len(lowerCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowerCAmelCase__ ( ):
_A : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_A : Optional[Any] = count_inversions_bf(lowerCamelCase )
_A , _A : int = count_inversions_recursive(lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' ,lowerCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_A : Optional[Any] = count_inversions_bf(lowerCamelCase )
_A , _A : Union[str, Any] = count_inversions_recursive(lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' ,lowerCamelCase )
# an empty list should also have zero inversions
_A : Any = []
_A : Any = count_inversions_bf(lowerCamelCase )
_A , _A : Any = count_inversions_recursive(lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' ,lowerCamelCase )
if __name__ == "__main__":
main()
| 128
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : list[str] | None = None ):
_A : str = word_bank or []
# create a table
_A : int = len(lowerCamelCase ) + 1
_A : list[list[list[str]]] = []
for _ in range(lowerCamelCase ):
table.append([] )
# seed value
_A : Tuple = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase )]:
combination.reverse()
return table[len(lowerCamelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 128
| 1
|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __a ( ):
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowercase__ = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , UpperCAmelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __a ( ):
'''simple docstring'''
assert _test_patching.open is open
lowercase__ = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , UpperCAmelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __a ( ):
'''simple docstring'''
lowercase__ = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , UpperCAmelCase__ ):
pass
def __a ( ):
'''simple docstring'''
lowercase__ = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , UpperCAmelCase__ ) is None
with patch_submodule(_test_patching , "len" , UpperCAmelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __a ( ):
'''simple docstring'''
lowercase__ = "__test_patch_submodule_start_and_stop_mock__"
lowercase__ = patch_submodule(_test_patching , "open" , UpperCAmelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __a ( ):
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowercase__ = "__test_patch_submodule_successive_join__"
lowercase__ = "__test_patch_submodule_successive_dirname__"
lowercase__ = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , UpperCAmelCase__ ):
with patch_submodule(_test_patching , "os.rename" , UpperCAmelCase__ ):
with patch_submodule(_test_patching , "os.path.dirname" , UpperCAmelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , UpperCAmelCase__ ):
with patch_submodule(_test_patching , "os.path.join" , UpperCAmelCase__ ):
with patch_submodule(_test_patching , "os.path.dirname" , UpperCAmelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __a ( ):
'''simple docstring'''
lowercase__ = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , UpperCAmelCase__ ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , UpperCAmelCase__ ):
pass
| 710
|
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ):
'''simple docstring'''
lowercase__ = symbols(A )
lowercase__ = lambdify(A , A )
lowercase__ = lambdify(A , diff(A , A ) )
lowercase__ = starting_point
while True:
if diff_function(A ) != 0:
lowercase__ = prev_guess - multiplicity * func(A ) / diff_function(
A )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 668
| 0
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 375
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="SpeechT5FeatureExtractor"
a : Any ="SpeechT5Tokenizer"
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
def __call__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = kwargs.pop("audio" , snake_case__ )
lowerCAmelCase : Tuple = kwargs.pop("text" , snake_case__ )
lowerCAmelCase : str = kwargs.pop("text_target" , snake_case__ )
lowerCAmelCase : List[str] = kwargs.pop("audio_target" , snake_case__ )
lowerCAmelCase : Union[str, Any] = kwargs.pop("sampling_rate" , snake_case__ )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
lowerCAmelCase : int = self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
elif text is not None:
lowerCAmelCase : Optional[int] = self.tokenizer(snake_case__ , **snake_case__ )
else:
lowerCAmelCase : Union[str, Any] = None
if audio_target is not None:
lowerCAmelCase : Optional[Any] = self.feature_extractor(audio_target=snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
lowerCAmelCase : Any = targets["input_values"]
elif text_target is not None:
lowerCAmelCase : Tuple = self.tokenizer(snake_case__ , **snake_case__ )
lowerCAmelCase : str = targets["input_ids"]
else:
lowerCAmelCase : str = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase : List[str] = labels
lowerCAmelCase : List[Any] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCAmelCase : Union[str, Any] = decoder_attention_mask
return inputs
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = kwargs.pop("input_values" , snake_case__ )
lowerCAmelCase : List[Any] = kwargs.pop("input_ids" , snake_case__ )
lowerCAmelCase : Dict = kwargs.pop("labels" , snake_case__ )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
lowerCAmelCase : int = self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
elif input_ids is not None:
lowerCAmelCase : Optional[Any] = self.tokenizer.pad(snake_case__ , **snake_case__ )
else:
lowerCAmelCase : Optional[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(snake_case__ , snake_case__ ) and "input_ids" in labels[0]):
lowerCAmelCase : Tuple = self.tokenizer.pad(snake_case__ , **snake_case__ )
lowerCAmelCase : Any = targets["input_ids"]
else:
lowerCAmelCase : List[Any] = self.feature_extractor.feature_size
lowerCAmelCase : Optional[int] = self.feature_extractor.num_mel_bins
lowerCAmelCase : str = self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = feature_size_hack
lowerCAmelCase : Optional[Any] = targets["input_values"]
else:
lowerCAmelCase : List[Any] = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase : int = labels
lowerCAmelCase : Optional[int] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCAmelCase : List[Any] = decoder_attention_mask
return inputs
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
| 645
| 0
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = IFInpaintingSuperResolutionPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
return self._get_superresolution_dummy_components()
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any=0 ) -> Dict:
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: int = torch.manual_seed(__snake_case )
else:
__magic_name__: List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: Tuple = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : Dict ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__ ( self : int ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
self._test_save_load_local()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 213
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __A ( unittest.TestCase ):
UpperCAmelCase__ = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__: int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
__magic_name__: Dict = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
__magic_name__: Dict = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
__magic_name__: Optional[Any] = text_generator("""This is a test""" , do_sample=__snake_case , num_return_sequences=2 , return_tensors=__snake_case )
self.assertEqual(
__snake_case , [
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
] , )
__magic_name__: List[str] = text_generator.model.config.eos_token_id
__magic_name__: Dict = """<pad>"""
__magic_name__: Dict = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=__snake_case , )
self.assertEqual(
__snake_case , [
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
] , )
@require_tf
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__: int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
__magic_name__: Optional[Any] = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
__magic_name__: Optional[int] = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Tuple ) -> Any:
__magic_name__: int = TextGenerationPipeline(model=__snake_case , tokenizer=__snake_case )
return text_generator, ["This is a test", "Another test"]
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: Tuple = """Hello I believe in"""
__magic_name__: List[str] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
__magic_name__: List[Any] = text_generator(__snake_case )
self.assertEqual(
__snake_case , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
__magic_name__: List[str] = text_generator(__snake_case , stop_sequence=""" fe""" )
self.assertEqual(__snake_case , [{"""generated_text""": """Hello I believe in fe"""}] )
def lowerCamelCase__ ( self : Any , __snake_case : List[Any] , __snake_case : Union[str, Any] ) -> str:
__magic_name__: Optional[int] = text_generator.model
__magic_name__: Union[str, Any] = text_generator.tokenizer
__magic_name__: Union[str, Any] = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__magic_name__: str = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__magic_name__: Optional[int] = pipeline(task="""text-generation""" , model=__snake_case , tokenizer=__snake_case , return_full_text=__snake_case )
__magic_name__: Tuple = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__magic_name__: Optional[int] = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__magic_name__: List[str] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__magic_name__: Union[str, Any] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
with self.assertRaises(__snake_case ):
__magic_name__: Any = text_generator("""test""" , return_full_text=__snake_case , return_text=__snake_case )
with self.assertRaises(__snake_case ):
__magic_name__: List[str] = text_generator("""test""" , return_full_text=__snake_case , return_tensors=__snake_case )
with self.assertRaises(__snake_case ):
__magic_name__: Tuple = text_generator("""test""" , return_text=__snake_case , return_tensors=__snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__magic_name__: int = text_generator("""""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__magic_name__: Any = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__magic_name__: Union[str, Any] = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_0_0 , max_new_tokens=2_0 )
__magic_name__: List[str] = text_generator("""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__snake_case ):
text_generator(
"""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
import torch
# Classic `model_kwargs`
__magic_name__: Optional[int] = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__: Optional[int] = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__magic_name__: Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__: Optional[Any] = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__magic_name__: int = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__magic_name__: Any = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCamelCase__ ( self : List[str] ) -> Any:
import torch
__magic_name__: List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : Dict ) -> Any:
import torch
__magic_name__: List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__snake_case , top_p=0.5 )
def lowerCamelCase__ ( self : List[str] ) -> Any:
__magic_name__: Optional[int] = """Hello world"""
__magic_name__: List[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
__magic_name__: str = logging.get_logger("""transformers.generation.tf_utils""" )
else:
__magic_name__: Any = logging.get_logger("""transformers.generation.utils""" )
__magic_name__: Union[str, Any] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__snake_case ) as cl:
__magic_name__: Dict = text_generator(__snake_case , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__snake_case ) as cl:
__magic_name__: str = text_generator(__snake_case , max_new_tokens=1 )
self.assertNotIn(__snake_case , cl.out )
with CaptureLogger(__snake_case ) as cl:
__magic_name__: Dict = text_generator(__snake_case , max_length=1_0 )
self.assertNotIn(__snake_case , cl.out )
| 213
| 1
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class lowercase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = XLMProphetNetTokenizer
a : Union[str, Any] = False
a : Tuple = True
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ : Union[str, Any] = XLMProphetNetTokenizer(A_, keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : str = '''[PAD]'''
UpperCamelCase__ : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ), A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ), A_ )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''[PAD]''' )
self.assertEqual(vocab_keys[1], '''[CLS]''' )
self.assertEqual(vocab_keys[-1], '''j''' )
self.assertEqual(len(A_ ), 1012 )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 1012 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = XLMProphetNetTokenizer(A_, keep_accents=A_ )
UpperCamelCase__ : str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A_, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
UpperCamelCase__ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A_, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
UpperCamelCase__ : Dict = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
], )
UpperCamelCase__ : str = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
], )
@cached_property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = '''Hello World!'''
UpperCamelCase__ : Optional[Any] = [35389, 6672, 49, 2]
self.assertListEqual(A_, self.big_tokenizer.encode(A_ ) )
@slow
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
# fmt: off
UpperCamelCase__ : str = {'''input_ids''': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_, model_name='''microsoft/xprophetnet-large-wiki100-cased''', revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''', )
| 253
|
'''simple docstring'''
def lowercase__( _UpperCamelCase : int = 100 )-> int:
"""simple docstring"""
_UpperCamelCase = set()
_UpperCamelCase = 0
_UpperCamelCase = n + 1 # maximum limit
for a in range(2 , _UpperCamelCase ):
for b in range(2 , _UpperCamelCase ):
_UpperCamelCase = a**b # calculates the current power
collect_powers.add(_UpperCamelCase ) # adds the result to the set
return len(_UpperCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 138
| 0
|
import random
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = num - 1
lowerCAmelCase : Dict = 0
while s % 2 == 0:
lowerCAmelCase : Union[str, Any] = s // 2
t += 1
for _ in range(5 ):
lowerCAmelCase : Any = random.randrange(2 ,num - 1 )
lowerCAmelCase : Any = pow(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if v != 1:
lowerCAmelCase : Any = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCAmelCase : Union[str, Any] = i + 1
lowerCAmelCase : Tuple = (v**2) % num
return True
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if num < 2:
return False
lowerCAmelCase : List[str] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 1_0_2_4 ):
'''simple docstring'''
while True:
lowerCAmelCase : int = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
lowerCAmelCase : str =generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 707
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : str = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23
|
import numpy
# List of input, output pairs
_A = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_A = (((515, 22, 13), 555), ((61, 35, 49), 150))
_A = [2, 4, 1, 5]
_A = len(train_data)
_A = 0.009
def lowerCAmelCase_ ( __a , __a="train" ) -> Optional[int]:
"""simple docstring"""
return calculate_hypothesis_value(__a , __a ) - output(
__a , __a )
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple =0
for i in range(len(__a ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCAmelCase_ ( __a , __a=m ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple =0
for i in range(__a ):
if index == -1:
summation_value += _error(__a )
else:
summation_value += _error(__a ) * train_data[i][0][index]
return summation_value
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str =summation_of_cost_derivative(__a , __a ) / m
return cost_derivative_value
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
SCREAMING_SNAKE_CASE : Tuple =0.000002
SCREAMING_SNAKE_CASE : Optional[Any] =0
SCREAMING_SNAKE_CASE : Tuple =0
while True:
j += 1
SCREAMING_SNAKE_CASE : List[str] =[0, 0, 0, 0]
for i in range(0 , len(__a ) ):
SCREAMING_SNAKE_CASE : Tuple =get_cost_derivative(i - 1 )
SCREAMING_SNAKE_CASE : Tuple =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__a , __a , atol=__a , rtol=__a , ):
break
SCREAMING_SNAKE_CASE : Union[str, Any] =temp_parameter_vector
print(('''Number of iterations:''', j) )
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
for i in range(len(__a ) ):
print(('''Actual output value:''', output(__a , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(__a , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 258
| 0
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def A__ ( snake_case_ : int ):
if isinstance(snake_case_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowerCamelCase :
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
pass
def UpperCamelCase_ ( self ) -> Dict:
pass
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Dict= np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase , lowerCAmelCase , f'Difference between torch and flax is {diff} (>= {tol}).' )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[int]= VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= FlaxVisionTextDualEncoderModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__: Dict= self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''vision_model''': vision_model, '''text_model''': text_model}
SCREAMING_SNAKE_CASE__: Union[str, Any]= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[str]= self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= {'''vision_model''': vision_model, '''text_model''': text_model}
SCREAMING_SNAKE_CASE__: Dict= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= after_output[0]
SCREAMING_SNAKE_CASE__: str= np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase , 1e-3 )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__: str= self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= {'''vision_model''': vision_model, '''text_model''': text_model}
SCREAMING_SNAKE_CASE__: Tuple= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= model(
input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , output_attentions=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__: int= to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE__: Union[str, Any]= to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE__: str= (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE__: str= num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE__: List[str]= output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
pt_model.to(lowerCAmelCase )
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE__: Optional[Any]= inputs_dict
SCREAMING_SNAKE_CASE__: List[str]= {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Tuple= pt_model(**lowerCAmelCase ).to_tuple()
SCREAMING_SNAKE_CASE__: int= fx_model(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase , from_pt=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= fx_model_loaded(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase , from_flax=lowerCAmelCase )
pt_model_loaded.to(lowerCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Optional[Any]= pt_model_loaded(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase , pt_output_loaded.numpy() , 4e-2 )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Union[str, Any]= VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= VisionTextDualEncoderModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= FlaxVisionTextDualEncoderModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= fx_state
self.check_pt_flax_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Dict= VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= VisionTextDualEncoderModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= FlaxVisionTextDualEncoderModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= load_flax_weights_in_pytorch_model(lowerCAmelCase , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: List[str]= self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Dict= self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Dict= self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Tuple= self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase )
@is_pt_flax_cross_test
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Dict= self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__: int= config_inputs_dict.pop('''vision_config''' )
SCREAMING_SNAKE_CASE__: int= config_inputs_dict.pop('''text_config''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.check_equivalence_flax_to_pt(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE__: Tuple= model_a(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= model_a(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= after_outputs[0]
SCREAMING_SNAKE_CASE__: Optional[Any]= np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase , 1e-5 )
@require_flax
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Any= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase , text_from_pt=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Optional[int]= 13
SCREAMING_SNAKE_CASE__: List[Any]= floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE__: List[str]= ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE__: List[str]= random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__: str= {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: str= FlaxViTModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= FlaxBertModel(lowerCAmelCase )
return vision_model, text_model
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Dict= FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE__: Optional[Any]= FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE__: Any= vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__: int= bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__: Optional[int]= vision_config_and_inputs
SCREAMING_SNAKE_CASE__: Tuple= text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Dict= FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase , text_from_pt=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Optional[Any]= 13
SCREAMING_SNAKE_CASE__: Optional[int]= floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE__: Optional[Any]= ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE__: Optional[int]= random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__: Tuple= {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: List[Any]= FlaxCLIPVisionModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= FlaxBertModel(lowerCAmelCase )
return vision_model, text_model
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: List[Any]= FlaxCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE__: Optional[Any]= FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE__: Optional[Any]= clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__: int= bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__: Optional[Any]= vision_config_and_inputs
SCREAMING_SNAKE_CASE__: Any= text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Any= FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
SCREAMING_SNAKE_CASE__: Tuple= VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
SCREAMING_SNAKE_CASE__: Dict= Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__: Optional[int]= processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__: Any= model(**lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE__: Optional[int]= np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase , atol=1e-3 ) )
| 718
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowercase_ : str = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class _lowerCamelCase :
__a = 42
__a = None
__a = None
__a = None
__a = None
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Dict:
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def UpperCamelCase_ ( self ) -> List[str]:
return self.major, self.minor, self.patch
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Dict:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return Version(lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
return other
raise TypeError(f'{other} (type {type(lowerCAmelCase )}) cannot be compared to version.' )
def __eq__( self , lowerCAmelCase ) -> Optional[int]:
try:
SCREAMING_SNAKE_CASE__: List[str]= self._validate_operand(lowerCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= self._validate_operand(lowerCAmelCase )
return self.tuple < other.tuple
def __hash__( self ) -> List[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__: Dict= {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCamelCase_ ( self ) -> str:
return self.version_str
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: List[Any]= _VERSION_REG.match(snake_case_ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case_ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def A__ ( snake_case_ : Union[str, Any] ):
return ".".join(str(snake_case_ ) for v in version_tuple )
| 107
| 0
|
from math import factorial
def lowerCAmelCase_ ( _snake_case : int = 20 ) -> int:
'''simple docstring'''
__magic_name__ : List[str] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__magic_name__ : Tuple = n // 2
return int(factorial(_snake_case ) / (factorial(_snake_case ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
snake_case : str = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 124
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Optional[Any] = {"vocab_file": "spiece.model"}
snake_case : Optional[Any] = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
snake_case : Union[str, Any] = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self , _a , _a=False , _a=False , _a=False , _a=None , _a=None , _a=None , _a=None , _a = None , **_a , ):
__magic_name__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__ : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
__magic_name__ : Optional[int] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__magic_name__ : List[Any] = "<|endoftext|>" if eos_token is None else eos_token
__magic_name__ : str = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__magic_name__ : Dict = unk_token if pad_token is None else pad_token
__magic_name__ : Optional[Any] = eos_token if bos_token is None else bos_token
else:
__magic_name__ : Optional[Any] = "<pad>" if pad_token is None else pad_token
__magic_name__ : Tuple = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__magic_name__ : Dict = do_lower_case
__magic_name__ : List[str] = remove_space
__magic_name__ : Tuple = keep_accents
__magic_name__ : Optional[Any] = vocab_file
__magic_name__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
# Used for whitespace normalization in input texts
# fmt : off
__magic_name__ : Any = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__magic_name__ : Tuple = re.compile(
f'''[{"".join(map(_a , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]''' )
def __getstate__( self ):
__magic_name__ : str = self.__dict__.copy()
__magic_name__ : str = None
return state
def __setstate__( self , _a ):
__magic_name__ : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__magic_name__ : int = {}
__magic_name__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Union[str, Any] = self.non_printing_characters_re.sub("" , _a )
# Normalize whitespaces
__magic_name__ : Any = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
__magic_name__ : Dict = unicodedata.normalize("NFC" , _a )
return text
def SCREAMING_SNAKE_CASE ( self , _a , **_a ):
__magic_name__ : str = self.preprocess_text(_a )
return self.sp_model.encode(_a , out_type=_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.IdToPiece(_a )
@staticmethod
def SCREAMING_SNAKE_CASE ( _a ):
return out_string
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Union[str, Any] = []
__magic_name__ : Optional[int] = ""
__magic_name__ : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__magic_name__ : Optional[Any] = True
__magic_name__ : List[Any] = []
else:
current_sub_tokens.append(_a )
__magic_name__ : str = False
out_string += self.sp_model.decode(_a )
return out_string
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Dict = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
__magic_name__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self , _a , _a = False ):
if isinstance(_a , _a ):
__magic_name__ : Optional[Any] = self.preprocess_text(_a )
__magic_name__ : Any = self.sp_model.encode(_a )
else:
__magic_name__ : Union[str, Any] = [self.preprocess_text(_a ) for t in text]
__magic_name__ : List[str] = self.sp_model.encode(_a )
if return_tensors is True or return_tensors == "pt":
__magic_name__ : Any = torch.tensor(_a )
return token_ids
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.decode(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__magic_name__ : Union[str, Any] = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(_a ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=_a )
| 124
| 1
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a :
"""simple docstring"""
def __init__( self : Any , snake_case_ : Optional[int] , snake_case_ : int=1_3 , snake_case_ : Any=3_0 , snake_case_ : Dict=2 , snake_case_ : Union[str, Any]=3 , snake_case_ : Optional[int]=True , snake_case_ : Dict=True , snake_case_ : List[str]=3_2 , snake_case_ : str=2 , snake_case_ : Tuple=4 , snake_case_ : int=3_7 , snake_case_ : Union[str, Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : int=0.1 , snake_case_ : Tuple=1_0 , snake_case_ : Optional[Any]=0.0_2 , snake_case_ : Optional[Any]=3 , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=2 , ):
'''simple docstring'''
snake_case__ : Optional[int] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Tuple = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : Union[str, Any] = is_training
snake_case__ : Tuple = use_labels
snake_case__ : Tuple = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : List[str] = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : Dict = type_sequence_label_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : Tuple = scope
snake_case__ : Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case__ : Union[str, Any] = (image_size // patch_size) ** 2
snake_case__ : Dict = num_patches + 2
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : List[str] = TFDeiTModel(config=snake_case_ )
snake_case__ : int = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : str , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Tuple ):
'''simple docstring'''
snake_case__ : str = TFDeiTForMaskedImageModeling(config=snake_case_ )
snake_case__ : str = model(snake_case_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ : int = 1
snake_case__ : Tuple = TFDeiTForMaskedImageModeling(snake_case_ )
snake_case__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : int = model(snake_case_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self : List[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Tuple ):
'''simple docstring'''
snake_case__ : Tuple = self.type_sequence_label_size
snake_case__ : Dict = TFDeiTForImageClassification(snake_case_ )
snake_case__ : int = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : int = 1
snake_case__ : Any = TFDeiTForImageClassification(snake_case_ )
snake_case__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : int = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Any = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : List[Any] = config_and_inputs
snake_case__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Union[str, Any] = TFDeiTModelTester(self )
snake_case__ : Optional[int] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Union[str, Any] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , tf.keras.layers.Dense ) )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(snake_case_ )
snake_case__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Any = [*signature.parameters.keys()]
snake_case__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case_ )
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def __magic_name__ ( self : List[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : str=False ):
'''simple docstring'''
snake_case__ : List[str] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : int = TFDeiTModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _a ( ):
"""simple docstring"""
snake_case__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Any = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
snake_case__ : List[str] = self.default_image_processor
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors='''tf''' )
# forward pass
snake_case__ : Optional[int] = model(**snake_case_ )
# verify the logits
snake_case__ : List[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : Optional[int] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
| 502
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """pixel_values"""
__UpperCAmelCase = False
__UpperCAmelCase = TimmBackboneConfig
def __init__( self : str , snake_case_ : str , **snake_case_ : Dict ):
'''simple docstring'''
requires_backends(self , '''timm''' )
super().__init__(snake_case_ )
snake_case__ : List[str] = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(snake_case_ , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
snake_case__ : Dict = getattr(snake_case_ , '''use_pretrained_backbone''' , snake_case_ )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
snake_case__ : int = config.out_indices if getattr(snake_case_ , '''out_indices''' , snake_case_ ) is not None else (-1,)
snake_case__ : Any = timm.create_model(
config.backbone , pretrained=snake_case_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=snake_case_ , **snake_case_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
snake_case__ : str = self._backbone.return_layers
snake_case__ : List[Any] = {layer['''module''']: str(snake_case_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(snake_case_ )
@classmethod
def __magic_name__ ( cls : Union[str, Any] , snake_case_ : Union[str, Any] , *snake_case_ : Optional[Any] , **snake_case_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
snake_case__ : Dict = kwargs.pop('''config''' , TimmBackboneConfig() )
snake_case__ : Optional[Any] = kwargs.pop('''use_timm_backbone''' , snake_case_ )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
snake_case__ : List[Any] = kwargs.pop('''num_channels''' , config.num_channels )
snake_case__ : Optional[Any] = kwargs.pop('''features_only''' , config.features_only )
snake_case__ : Tuple = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
snake_case__ : Union[str, Any] = kwargs.pop('''out_indices''' , config.out_indices )
snake_case__ : int = TimmBackboneConfig(
backbone=snake_case_ , num_channels=snake_case_ , features_only=snake_case_ , use_pretrained_backbone=snake_case_ , out_indices=snake_case_ , )
return super()._from_config(snake_case_ , **snake_case_ )
def __magic_name__ ( self : List[str] , snake_case_ : Union[str, Any] ):
'''simple docstring'''
pass
def __magic_name__ ( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None , **snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : int = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
snake_case__ : Any = self._all_layers
snake_case__ : str = self._backbone(snake_case_ , **snake_case_ )
snake_case__ : Tuple = self._return_layers
snake_case__ : str = tuple(hidden_states[i] for i in self.out_indices )
else:
snake_case__ : Dict = self._backbone(snake_case_ , **snake_case_ )
snake_case__ : Any = None
snake_case__ : Union[str, Any] = tuple(snake_case_ )
snake_case__ : Optional[int] = tuple(snake_case_ ) if hidden_states is not None else None
if not return_dict:
snake_case__ : List[str] = (feature_maps,)
if output_hidden_states:
snake_case__ : Optional[int] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=snake_case_ , hidden_states=snake_case_ , attentions=snake_case_ )
| 502
| 1
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : str = parent
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {}
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
snake_case_ : Tuple = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : int = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = MarkupLMFeatureExtractionTester(self )
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.feature_extraction_class()
# Test not batched input
snake_case_ : Tuple = get_html_strings()[0]
snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ )
# fmt: off
snake_case_ : int = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
snake_case_ : Any = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , __magic_name__ )
self.assertEqual(encoding.xpaths , __magic_name__ )
# Test batched
snake_case_ : int = get_html_strings()
snake_case_ : List[str] = feature_extractor(__magic_name__ )
# fmt: off
snake_case_ : Any = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
snake_case_ : int = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __magic_name__ )
self.assertEqual(encoding.xpaths , __magic_name__ )
| 60
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : str = logging.get_logger(__name__)
lowercase : Any = ["model.decoder.embed_positions.weights"]
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[Any]:
if "emb" in name:
_snake_case = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
_snake_case = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
_snake_case = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
_snake_case = name.replace('linear1' , 'fc1' )
if "linear2" in name:
_snake_case = name.replace('linear2' , 'fc2' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
_snake_case = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
_snake_case = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
_snake_case = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
_snake_case = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Tuple[Dict, Dict]:
_snake_case = list(state_dict.keys() )
_snake_case = {}
for key in keys:
_snake_case = state_dict.pop(__A )
_snake_case = rename_keys(__A )
if "in_proj_weight" in key:
# split fused qkv proj
_snake_case = val[:hidden_size, :]
_snake_case = val[hidden_size : 2 * hidden_size, :]
_snake_case = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_snake_case = val
else:
_snake_case = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE__ ( __A ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
_snake_case = 1_024
_snake_case = 24
_snake_case = 16
elif checkpoint == "medium":
_snake_case = 1_536
_snake_case = 48
_snake_case = 24
elif checkpoint == "large":
_snake_case = 2_048
_snake_case = 48
_snake_case = 32
else:
raise ValueError(F'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
_snake_case = MusicgenDecoderConfig(
hidden_size=__A , ffn_dim=hidden_size * 4 , num_hidden_layers=__A , num_attention_heads=__A , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A=None , __A=None , __A="cpu" ) -> Any:
_snake_case = MusicGen.get_pretrained(__A , device=__A )
_snake_case = decoder_config_from_checkpoint(__A )
_snake_case = fairseq_model.lm.state_dict()
_snake_case , _snake_case = rename_state_dict(
__A , hidden_size=decoder_config.hidden_size )
_snake_case = TaEncoderModel.from_pretrained('t5-base' )
_snake_case = EncodecModel.from_pretrained('facebook/encodec_32khz' )
_snake_case = MusicgenForCausalLM(__A ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_snake_case , _snake_case = decoder.load_state_dict(__A , strict=__A )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__A )
if len(__A ) > 0:
raise ValueError(F'Missing key(s) in state_dict: {missing_keys}' )
if len(__A ) > 0:
raise ValueError(F'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
_snake_case = MusicgenForConditionalGeneration(text_encoder=__A , audio_encoder=__A , decoder=__A )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__A )
# check we can do a forward pass
_snake_case = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_snake_case = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_snake_case = model(input_ids=__A , decoder_input_ids=__A ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
_snake_case = AutoTokenizer.from_pretrained('t5-base' )
_snake_case = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
_snake_case = MusicgenProcessor(feature_extractor=__A , tokenizer=__A )
# set the appropriate bos/pad token ids
_snake_case = 2_048
_snake_case = 2_048
# set other default generation config params
_snake_case = int(30 * audio_encoder.config.frame_rate )
_snake_case = True
_snake_case = 3.0
if pytorch_dump_folder is not None:
Path(__A ).mkdir(exist_ok=__A )
logger.info(F'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if repo_id:
logger.info(F'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__A )
processor.push_to_hub(__A )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowercase : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 495
| 0
|
from torch import nn
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
super().__init__()
lowercase__ : Dict = class_size
lowercase__ : Tuple = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowercase__ : Dict = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowercase__ : int = self.mlp(UpperCAmelCase_ )
return logits
| 706
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 569
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCAmelCase = get_logger(__name__)
class _UpperCAmelCase :
def __init__( self , a__ = None ):
A_ : List[Any] = (
os.path.join(a__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
A_ : Optional[Any] = Extractor
def _lowerCamelCase ( self , a__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
A_ : Dict = os.path.abspath(a__ )
return os.path.join(self.extract_dir , hash_url_to_filename(a__ ) )
def _lowerCamelCase ( self , a__ , a__ ):
return force_extract or (
not os.path.isfile(a__ ) and not (os.path.isdir(a__ ) and os.listdir(a__ ))
)
def _lowerCamelCase ( self , a__ , a__ = False ):
A_ : Optional[int] = self.extractor.infer_extractor_format(a__ )
if not extractor_format:
return input_path
A_ : str = self._get_output_path(a__ )
if self._do_extract(a__ , a__ ):
self.extractor.extract(a__ , a__ , a__ )
return output_path
class _UpperCAmelCase ( _lowerCamelCase ):
@classmethod
@abstractmethod
def _lowerCamelCase ( cls , a__ , **a__ ):
...
@staticmethod
@abstractmethod
def _lowerCamelCase ( a__ , a__ ):
...
class _UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
a = []
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
with open(a__ , """rb""" ) as f:
return f.read(a__ )
@classmethod
def _lowerCamelCase ( cls , a__ , a__ = b"" ):
if not magic_number:
A_ : List[str] = max(len(a__ ) for cls_magic_number in cls.magic_numbers )
try:
A_ : Union[str, Any] = cls.read_magic_number(a__ , a__ )
except OSError:
return False
return any(magic_number.startswith(a__ ) for cls_magic_number in cls.magic_numbers )
class _UpperCAmelCase ( _lowerCamelCase ):
@classmethod
def _lowerCamelCase ( cls , a__ , **a__ ):
return tarfile.is_tarfile(a__ )
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
def resolved(a__ ) -> str:
return os.path.realpath(os.path.abspath(a__ ) )
def badpath(a__ , a__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(a__ , a__ ) ).startswith(a__ )
def badlink(a__ , a__ ) -> bool:
# Links are interpreted relative to the directory containing the link
A_ : Tuple = resolved(os.path.join(a__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=a__ )
A_ : Dict = resolved(a__ )
for finfo in members:
if badpath(finfo.name , a__ ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(a__ , a__ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(a__ , a__ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
os.makedirs(a__ , exist_ok=a__ )
A_ : List[Any] = tarfile.open(a__ )
tar_file.extractall(a__ , members=TarExtractor.safemembers(a__ , a__ ) )
tar_file.close()
class _UpperCAmelCase ( _lowerCamelCase ):
a = [B'''\x1F\x8B''']
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
with gzip.open(a__ , """rb""" ) as gzip_file:
with open(a__ , """wb""" ) as extracted_file:
shutil.copyfileobj(a__ , a__ )
class _UpperCAmelCase ( _lowerCamelCase ):
a = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def _lowerCamelCase ( cls , a__ , a__ = b"" ):
if super().is_extractable(a__ , magic_number=a__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(a__ , """rb""" ) as fp:
A_ : Tuple = _EndRecData(a__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
A_ : Any = fp.read(a__ ) # CD is where we expect it to be
if len(a__ ) == sizeCentralDir:
A_ : Tuple = struct.unpack(a__ , a__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
os.makedirs(a__ , exist_ok=a__ )
with zipfile.ZipFile(a__ , """r""" ) as zip_file:
zip_file.extractall(a__ )
zip_file.close()
class _UpperCAmelCase ( _lowerCamelCase ):
a = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
with lzma.open(a__ ) as compressed_file:
with open(a__ , """wb""" ) as extracted_file:
shutil.copyfileobj(a__ , a__ )
class _UpperCAmelCase ( _lowerCamelCase ):
a = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(a__ , exist_ok=a__ )
A_ : Dict = rarfile.RarFile(a__ )
rf.extractall(a__ )
rf.close()
class _UpperCAmelCase ( _lowerCamelCase ):
a = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
A_ : str = zstd.ZstdDecompressor()
with open(a__ , """rb""" ) as ifh, open(a__ , """wb""" ) as ofh:
dctx.copy_stream(a__ , a__ )
class _UpperCAmelCase ( _lowerCamelCase ):
a = [B'''\x42\x5A\x68''']
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
with bza.open(a__ , """rb""" ) as compressed_file:
with open(a__ , """wb""" ) as extracted_file:
shutil.copyfileobj(a__ , a__ )
class _UpperCAmelCase ( _lowerCamelCase ):
a = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(a__ , exist_ok=a__ )
with pyazr.SevenZipFile(a__ , """r""" ) as archive:
archive.extractall(a__ )
class _UpperCAmelCase ( _lowerCamelCase ):
a = [B'''\x04\x22\x4D\x18''']
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(a__ , """rb""" ) as compressed_file:
with open(a__ , """wb""" ) as extracted_file:
shutil.copyfileobj(a__ , a__ )
class _UpperCAmelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _lowerCamelCase ( cls ):
return max(
len(a__ )
for extractor in cls.extractors.values()
if issubclass(a__ , a__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(a__ , magic_number_length=a__ )
except OSError:
return b""
@classmethod
def _lowerCamelCase ( cls , a__ , a__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=a__ , )
A_ : Union[str, Any] = cls.infer_extractor_format(a__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _lowerCamelCase ( cls , a__ ): # <Added version="2.4.0"/>
A_ : Union[str, Any] = cls._get_magic_number_max_length()
A_ : Tuple = cls._read_magic_number(a__ , a__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(a__ , magic_number=a__ ):
return extractor_format
@classmethod
def _lowerCamelCase ( cls , a__ , a__ , a__ = None , a__ = "deprecated" , ):
os.makedirs(os.path.dirname(a__ ) , exist_ok=a__ )
# Prevent parallel extractions
A_ : Optional[int] = str(Path(a__ ).with_suffix(""".lock""" ) )
with FileLock(a__ ):
shutil.rmtree(a__ , ignore_errors=a__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(a__ , a__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=a__ , )
A_ : Optional[int] = extractor if extractor != """deprecated""" else extractor_format
else:
A_ : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(a__ , a__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=a__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(a__ ):
return extractor.extract(a__ , a__ )
| 569
| 1
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __A ( _lowercase ):
def __get__( self , UpperCAmelCase_ , UpperCAmelCase_=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
lowerCamelCase ="""__cached_""" + self.fget.__name__
lowerCamelCase =getattr(A_ , A_ , A_ )
if cached is None:
lowerCamelCase =self.fget(A_ )
setattr(A_ , A_ , A_ )
return cached
def _lowercase ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase =val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def _lowercase ( _UpperCAmelCase ) -> Any:
if is_torch_fx_proxy(snake_case__ ):
return True
if is_torch_available():
import torch
if isinstance(snake_case__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(snake_case__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(snake_case__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(snake_case__ , np.ndarray )
def _lowercase ( _UpperCAmelCase ) -> Dict:
return isinstance(snake_case__ , np.ndarray )
def _lowercase ( _UpperCAmelCase ) -> Tuple:
return _is_numpy(snake_case__ )
def _lowercase ( _UpperCAmelCase ) -> List[Any]:
import torch
return isinstance(snake_case__ , torch.Tensor )
def _lowercase ( _UpperCAmelCase ) -> Any:
return False if not is_torch_available() else _is_torch(snake_case__ )
def _lowercase ( _UpperCAmelCase ) -> Tuple:
import torch
return isinstance(snake_case__ , torch.device )
def _lowercase ( _UpperCAmelCase ) -> int:
return False if not is_torch_available() else _is_torch_device(snake_case__ )
def _lowercase ( _UpperCAmelCase ) -> Tuple:
import torch
if isinstance(snake_case__ , snake_case__ ):
if hasattr(snake_case__ , snake_case__ ):
lowerCamelCase =getattr(snake_case__ , snake_case__ )
else:
return False
return isinstance(snake_case__ , torch.dtype )
def _lowercase ( _UpperCAmelCase ) -> Tuple:
return False if not is_torch_available() else _is_torch_dtype(snake_case__ )
def _lowercase ( _UpperCAmelCase ) -> List[Any]:
import tensorflow as tf
return isinstance(snake_case__ , tf.Tensor )
def _lowercase ( _UpperCAmelCase ) -> Any:
return False if not is_tf_available() else _is_tensorflow(snake_case__ )
def _lowercase ( _UpperCAmelCase ) -> Dict:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(snake_case__ , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(snake_case__ )
return type(snake_case__ ) == tf.Tensor
def _lowercase ( _UpperCAmelCase ) -> List[str]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(snake_case__ )
def _lowercase ( _UpperCAmelCase ) -> Union[str, Any]:
import jax.numpy as jnp # noqa: F811
return isinstance(snake_case__ , jnp.ndarray )
def _lowercase ( _UpperCAmelCase ) -> Any:
return False if not is_flax_available() else _is_jax(snake_case__ )
def _lowercase ( _UpperCAmelCase ) -> Optional[int]:
if isinstance(snake_case__ , (dict, UserDict) ):
return {k: to_py_obj(snake_case__ ) for k, v in obj.items()}
elif isinstance(snake_case__ , (list, tuple) ):
return [to_py_obj(snake_case__ ) for o in obj]
elif is_tf_tensor(snake_case__ ):
return obj.numpy().tolist()
elif is_torch_tensor(snake_case__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(snake_case__ ):
return np.asarray(snake_case__ ).tolist()
elif isinstance(snake_case__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowercase ( _UpperCAmelCase ) -> Dict:
if isinstance(snake_case__ , (dict, UserDict) ):
return {k: to_numpy(snake_case__ ) for k, v in obj.items()}
elif isinstance(snake_case__ , (list, tuple) ):
return np.array(snake_case__ )
elif is_tf_tensor(snake_case__ ):
return obj.numpy()
elif is_torch_tensor(snake_case__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(snake_case__ ):
return np.asarray(snake_case__ )
else:
return obj
class __A ( _lowercase ):
def _snake_case ( self ):
lowerCamelCase =fields(self )
# Safety and consistency checks
if not len(A_ ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
lowerCamelCase =getattr(self , class_fields[0].name )
lowerCamelCase =all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(A_ ):
if isinstance(A_ , A_ ):
lowerCamelCase =first_field.items()
lowerCamelCase =True
else:
try:
lowerCamelCase =iter(A_ )
lowerCamelCase =True
except TypeError:
lowerCamelCase =False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(A_ ):
if (
not isinstance(A_ , (list, tuple) )
or not len(A_ ) == 2
or not isinstance(element[0] , A_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCamelCase =first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCamelCase =element[1]
elif first_field is not None:
lowerCamelCase =first_field
else:
for field in class_fields:
lowerCamelCase =getattr(self , field.name )
if v is not None:
lowerCamelCase =v
def __delitem__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , UpperCAmelCase_ ):
if isinstance(A_ , A_ ):
lowerCamelCase =dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , UpperCAmelCase_ , UpperCAmelCase_ ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(A_ , A_ )
super().__setattr__(A_ , A_ )
def __setitem__( self , UpperCAmelCase_ , UpperCAmelCase_ ):
# Will raise a KeyException if needed
super().__setitem__(A_ , A_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(A_ , A_ )
def _snake_case ( self ):
return tuple(self[k] for k in self.keys() )
class __A ( _lowercase , _lowercase ):
@classmethod
def _snake_case ( cls , UpperCAmelCase_ ):
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class __A ( _lowercase ):
__A = '''longest'''
__A = '''max_length'''
__A = '''do_not_pad'''
class __A ( _lowercase ):
__A = '''pt'''
__A = '''tf'''
__A = '''np'''
__A = '''jax'''
class __A :
def __init__( self , UpperCAmelCase_ ):
lowerCamelCase =context_managers
lowerCamelCase =ExitStack()
def __enter__( self ):
for context_manager in self.context_managers:
self.stack.enter_context(A_ )
def __exit__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
self.stack.__exit__(*A_ , **A_ )
def _lowercase ( _UpperCAmelCase ) -> int:
lowerCamelCase =infer_framework(snake_case__ )
if framework == "tf":
lowerCamelCase =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase =inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase =inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowercase ( _UpperCAmelCase ) -> Any:
lowerCamelCase =model_class.__name__
lowerCamelCase =infer_framework(snake_case__ )
if framework == "tf":
lowerCamelCase =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase =inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase =inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase = "" , _UpperCAmelCase = "." ) -> List[Any]:
def _flatten_dict(_UpperCAmelCase , _UpperCAmelCase="" , _UpperCAmelCase="." ):
for k, v in d.items():
lowerCamelCase =str(snake_case__ ) + delimiter + str(snake_case__ ) if parent_key else k
if v and isinstance(snake_case__ , snake_case__ ):
yield from flatten_dict(snake_case__ , snake_case__ , delimiter=snake_case__ ).items()
else:
yield key, v
return dict(_flatten_dict(snake_case__ , snake_case__ , snake_case__ ) )
@contextmanager
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase = False ) -> Tuple:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase=None ) -> Union[str, Any]:
if is_numpy_array(snake_case__ ):
return np.transpose(snake_case__ , axes=snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.T if axes is None else array.permute(*snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.transpose(snake_case__ , perm=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.transpose(snake_case__ , axes=snake_case__ )
else:
raise ValueError(F"""Type not supported for transpose: {type(snake_case__ )}.""" )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if is_numpy_array(snake_case__ ):
return np.reshape(snake_case__ , snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.reshape(*snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.reshape(snake_case__ , snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.reshape(snake_case__ , snake_case__ )
else:
raise ValueError(F"""Type not supported for reshape: {type(snake_case__ )}.""" )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase=None ) -> List[str]:
if is_numpy_array(snake_case__ ):
return np.squeeze(snake_case__ , axis=snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.squeeze() if axis is None else array.squeeze(dim=snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.squeeze(snake_case__ , axis=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.squeeze(snake_case__ , axis=snake_case__ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(snake_case__ )}.""" )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
if is_numpy_array(snake_case__ ):
return np.expand_dims(snake_case__ , snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.unsqueeze(dim=snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.expand_dims(snake_case__ , axis=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.expand_dims(snake_case__ , axis=snake_case__ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(snake_case__ )}.""" )
def _lowercase ( _UpperCAmelCase ) -> Optional[int]:
if is_numpy_array(snake_case__ ):
return np.size(snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.numel()
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.size(snake_case__ )
elif is_jax_tensor(snake_case__ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(snake_case__ )}.""" )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
for key, value in auto_map.items():
if isinstance(snake_case__ , (tuple, list) ):
lowerCamelCase =[F"""{repo_id}--{v}""" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCamelCase =F"""{repo_id}--{value}"""
return auto_map
def _lowercase ( _UpperCAmelCase ) -> str:
for base_class in inspect.getmro(snake_case__ ):
lowerCamelCase =base_class.__module__
lowerCamelCase =base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 700
|
from __future__ import annotations
UpperCAmelCase__ : str =tuple[int, int, int]
UpperCAmelCase__ : str =tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCAmelCase__ : int ='''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCAmelCase__ : Union[str, Any] ='''EGZWVONAHDCLFQMSIPJBYUKXTR'''
UpperCAmelCase__ : Union[str, Any] ='''FOBHMDKEXQNRAULPGSJVTYICZW'''
UpperCAmelCase__ : Union[str, Any] ='''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
UpperCAmelCase__ : Optional[Any] ={
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
UpperCAmelCase__ : Dict ='''RMDJXFUWGISLHVTCQNKYPBEZOA'''
UpperCAmelCase__ : Any ='''SGLCPQWZHKXAREONTFBVIYJUDM'''
UpperCAmelCase__ : List[Any] ='''HVSICLTYKQUBXDWAJZOMFGPREN'''
UpperCAmelCase__ : Tuple ='''RZWQHFMVDBKICJLNTUXAGYPSOE'''
UpperCAmelCase__ : Optional[Any] ='''LFKIJODBEGAMQPXVUHYSTCZRWN'''
UpperCAmelCase__ : Dict ='''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_UpperCAmelCase ) )) < 3:
lowerCamelCase =F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(_UpperCAmelCase )
# Checks if rotor positions are valid
lowerCamelCase , lowerCamelCase , lowerCamelCase =rotpos
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowerCamelCase =F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowerCamelCase =F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowerCamelCase =F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_UpperCAmelCase )
# Validates string and returns dict
lowerCamelCase =_plugboard(_UpperCAmelCase )
return rotpos, rotsel, pbdict
def _lowercase ( _UpperCAmelCase ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase =F"""Plugboard setting isn't type string ({type(_UpperCAmelCase )})"""
raise TypeError(_UpperCAmelCase )
elif len(_UpperCAmelCase ) % 2 != 0:
lowerCamelCase =F"""Odd number of symbols ({len(_UpperCAmelCase )})"""
raise Exception(_UpperCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
lowerCamelCase =set()
for i in pbstring:
if i not in abc:
lowerCamelCase =F"""'{i}' not in list of symbols"""
raise Exception(_UpperCAmelCase )
elif i in tmppbl:
lowerCamelCase =F"""Duplicate symbol ({i})"""
raise Exception(_UpperCAmelCase )
else:
tmppbl.add(_UpperCAmelCase )
del tmppbl
# Created the dictionary
lowerCamelCase ={}
for j in range(0 , len(_UpperCAmelCase ) - 1 , 2 ):
lowerCamelCase =pbstring[j + 1]
lowerCamelCase =pbstring[j]
return pb
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (rotora, rotora, rotora) , _UpperCAmelCase = "" , ) -> str:
lowerCamelCase =text.upper()
lowerCamelCase , lowerCamelCase , lowerCamelCase =_validator(
_UpperCAmelCase , _UpperCAmelCase , plugb.upper() )
lowerCamelCase , lowerCamelCase , lowerCamelCase =rotor_position
lowerCamelCase , lowerCamelCase , lowerCamelCase =rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCamelCase =[]
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCamelCase =plugboard[symbol]
# rotor ra --------------------------
lowerCamelCase =abc.index(_UpperCAmelCase ) + rotorposa
lowerCamelCase =rotora[index % len(_UpperCAmelCase )]
# rotor rb --------------------------
lowerCamelCase =abc.index(_UpperCAmelCase ) + rotorposa
lowerCamelCase =rotora[index % len(_UpperCAmelCase )]
# rotor rc --------------------------
lowerCamelCase =abc.index(_UpperCAmelCase ) + rotorposa
lowerCamelCase =rotora[index % len(_UpperCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCamelCase =reflector[symbol]
# 2nd rotors
lowerCamelCase =abc[rotora.index(_UpperCAmelCase ) - rotorposa]
lowerCamelCase =abc[rotora.index(_UpperCAmelCase ) - rotorposa]
lowerCamelCase =abc[rotora.index(_UpperCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCamelCase =plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowerCamelCase =0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowerCamelCase =0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowerCamelCase =0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] ='''This is my Python script that emulates the Enigma machine from WWII.'''
UpperCAmelCase__ : List[Any] =(1, 1, 1)
UpperCAmelCase__ : Optional[Any] ='''pictures'''
UpperCAmelCase__ : Any =(rotora, rotora, rotora)
UpperCAmelCase__ : str =enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 269
| 0
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a__ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
a__ = f"{src_lang}-{tgt_lang}"
a__ = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
a__ = os.path.join(__UpperCAmelCase , '''README.md''' )
print(f"Generating {path}" )
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(__UpperCAmelCase )
# make sure we are under the root of the project
a_ : str = Path(__file__).resolve().parent.parent.parent
a_ : List[Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ , a_ , a_ : Any = model_name.split('-')
a_ : List[str] = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 194
|
from __future__ import annotations
from typing import Any
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 ) -> None:
a__ , a__ = row, column
a__ = [[default_value for c in range(SCREAMING_SNAKE_CASE )] for r in range(SCREAMING_SNAKE_CASE )]
def __str__( self ) -> str:
a__ = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
a__ = 0
for row_vector in self.array:
for obj in row_vector:
a__ = max(SCREAMING_SNAKE_CASE , len(str(SCREAMING_SNAKE_CASE ) ) )
a__ = f"%{max_element_length}s"
# Make string and return
def single_line(SCREAMING_SNAKE_CASE ) -> str:
nonlocal string_format_identifier
a__ = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(SCREAMING_SNAKE_CASE ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
return str(self )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> bool:
if not (isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and len(SCREAMING_SNAKE_CASE ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , SCREAMING_SNAKE_CASE ) -> Any:
assert self.validate_indicies(SCREAMING_SNAKE_CASE )
return self.array[loc[0]][loc[1]]
def __setitem__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
assert self.validate_indicies(SCREAMING_SNAKE_CASE )
a__ = value
def __add__( self , SCREAMING_SNAKE_CASE ) -> Matrix:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert self.row == another.row and self.column == another.column
# Add
a__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
a__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ = -self[r, c]
return result
def __sub__( self , SCREAMING_SNAKE_CASE ) -> Matrix:
return self + (-another)
def __mul__( self , SCREAMING_SNAKE_CASE ) -> Matrix:
if isinstance(SCREAMING_SNAKE_CASE , (int, float) ): # Scalar multiplication
a__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ = self[r, c] * another
return result
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # Matrix multiplication
assert self.column == another.row
a__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
a__ = f"Unsupported type given for another ({type(SCREAMING_SNAKE_CASE )})"
raise TypeError(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Matrix:
a__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
a__ = self[r, c]
return result
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
a__ = v.transpose()
a__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __a ( ):
# a^(-1)
a__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
a__ = 1
print(f"a^(-1) is {ainv}" )
# u, v
a__ = Matrix(3 , 1 , 0 )
a__ , a__ , a__ = 1, 2, -3
a__ = Matrix(3 , 1 , 0 )
a__ , a__ , a__ = 4, -2, 5
print(f"u is {u}" )
print(f"v is {v}" )
print(f"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(__UpperCAmelCase , __UpperCAmelCase )}" )
def __a ( ):
import doctest
doctest.testmod()
testa()
| 194
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =BlenderbotConfig
__a ={}
__a ="""gelu"""
def __init__( self : str , __a : Optional[Any] , __a : Dict=13 , __a : Tuple=7 , __a : Tuple=True , __a : Any=False , __a : Union[str, Any]=99 , __a : Optional[Any]=32 , __a : List[Any]=2 , __a : Optional[int]=4 , __a : Optional[int]=37 , __a : int=0.1 , __a : Union[str, Any]=0.1 , __a : List[str]=20 , __a : Tuple=2 , __a : int=1 , __a : List[str]=0 , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = eos_token_id
_a = pad_token_id
_a = bos_token_id
def UpperCamelCase__ ( self : Optional[Any] ):
_a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_a = tf.concat([input_ids, eos_tensor] , axis=1 )
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_a = prepare_blenderbot_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def UpperCamelCase__ ( self : Union[str, Any] , __a : List[Any] , __a : List[Any] ):
_a = TFBlenderbotModel(config=lowercase__ ).get_decoder()
_a = inputs_dict["""input_ids"""]
_a = input_ids[:1, :]
_a = inputs_dict["""attention_mask"""][:1, :]
_a = inputs_dict["""head_mask"""]
_a = 1
# first forward pass
_a = model(lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__ , use_cache=lowercase__ )
_a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_a = tf.concat([input_ids, next_tokens] , axis=-1 )
_a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_a = model(lowercase__ , attention_mask=lowercase__ )[0]
_a = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_a = output_from_no_past[:, -3:, random_slice_idx]
_a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1e-3 )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Any , lowercase : List[str]=None , lowercase : Optional[Any]=None , lowercase : Optional[Any]=None , lowercase : List[str]=None , lowercase : Optional[int]=None , ) -> List[Any]:
if attention_mask is None:
_a = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__a =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__a =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__a =(
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a =True
__a =False
__a =False
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = TFBlenderbotModelTester(self )
_a = ConfigTester(self , config_class=lowercase__ )
def UpperCamelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
__a =["""My friends are cool but they eat too many carbs."""]
__a ="""facebook/blenderbot-400M-distill"""
@cached_property
def UpperCamelCase__ ( self : Dict ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self : int ):
_a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self : int ):
_a = self.tokenizer(self.src_text , return_tensors="tf" )
_a = self.model.generate(
model_inputs.input_ids , )
_a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 704
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def _lowerCamelCase ( lowercase : List[str] ) -> List[str]:
_a = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'{test_file} instead.' )
_a = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
_a = components[:-1] + [test_fn.replace(".py" , "" )]
_a = ".".join(lowercase )
return test_module_path
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
_a = get_module_path(lowercase )
_a = importlib.import_module(lowercase )
return test_module
def _lowerCamelCase ( lowercase : Optional[Any] ) -> List[str]:
_a = []
_a = get_test_module(lowercase )
for attr in dir(lowercase ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase , lowercase ) )
# sort with class names
return sorted(lowercase , key=lambda lowercase : x.__name__ )
def _lowerCamelCase ( lowercase : List[str] ) -> Any:
_a = []
_a = get_test_module(lowercase )
for attr in dir(lowercase ):
_a = getattr(lowercase , lowercase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_a = getattr(lowercase , "all_model_classes" , [] )
if len(lowercase ) > 0:
test_classes.append(lowercase )
# sort with class names
return sorted(lowercase , key=lambda lowercase : x.__name__ )
def _lowerCamelCase ( lowercase : int ) -> str:
_a = get_test_classes(lowercase )
_a = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase , key=lambda lowercase : x.__name__ )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Dict:
_a = test_class()
if hasattr(lowercase , "setUp" ):
test.setUp()
_a = None
if hasattr(lowercase , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_a = test.model_tester.__class__
return model_tester
def _lowerCamelCase ( lowercase : str , lowercase : Dict ) -> str:
_a = get_test_classes(lowercase )
_a = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase )
# sort with class names
return sorted(lowercase , key=lambda lowercase : x.__name__ )
def _lowerCamelCase ( lowercase : Dict , lowercase : Union[str, Any] ) -> Dict:
_a = get_test_classes_for_model(lowercase , lowercase )
_a = []
for test_class in test_classes:
_a = get_model_tester_from_test_class(lowercase )
if tester_class is not None:
tester_classes.append(lowercase )
# sort with class names
return sorted(lowercase , key=lambda lowercase : x.__name__ )
def _lowerCamelCase ( lowercase : List[Any] ) -> Tuple:
_a = get_test_classes(lowercase )
_a = {test_class: get_model_tester_from_test_class(lowercase ) for test_class in test_classes}
return test_tester_mapping
def _lowerCamelCase ( lowercase : List[str] ) -> Union[str, Any]:
_a = get_model_classes(lowercase )
_a = {
model_class: get_test_classes_for_model(lowercase , lowercase ) for model_class in model_classes
}
return model_test_mapping
def _lowerCamelCase ( lowercase : Optional[Any] ) -> str:
_a = get_model_classes(lowercase )
_a = {
model_class: get_tester_classes_for_model(lowercase , lowercase ) for model_class in model_classes
}
return model_to_tester_mapping
def _lowerCamelCase ( lowercase : List[str] ) -> Tuple:
if isinstance(lowercase , lowercase ):
return o
elif isinstance(lowercase , lowercase ):
return o.__name__
elif isinstance(lowercase , (list, tuple) ):
return [to_json(lowercase ) for x in o]
elif isinstance(lowercase , lowercase ):
return {to_json(lowercase ): to_json(lowercase ) for k, v in o.items()}
else:
return o
| 521
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 474
|
'''simple docstring'''
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def _A ( ):
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 474
| 1
|
'''simple docstring'''
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = len(set_a.intersection(_lowerCamelCase ) )
if alternative_union:
_lowerCAmelCase : Dict = len(_lowerCamelCase ) + len(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[Any] = len(set_a.union(_lowerCamelCase ) )
return intersection / union
if isinstance(_lowerCamelCase , (list, tuple) ) and isinstance(_lowerCamelCase , (list, tuple) ):
_lowerCAmelCase : Optional[Any] = [element for element in set_a if element in set_b]
if alternative_union:
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase ) + len(_lowerCamelCase )
return len(_lowerCamelCase ) / union
else:
_lowerCAmelCase : List[str] = set_a + [element for element in set_b if element not in set_a]
return len(_lowerCamelCase ) / len(_lowerCamelCase )
return len(_lowerCamelCase ) / len(_lowerCamelCase )
return None
if __name__ == "__main__":
_snake_case = {"a", "b", "c", "d", "e"}
_snake_case = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 706
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Tuple = 'vit_msn'
def __init__( self : List[Any] , lowerCamelCase__ : Any=768 , lowerCamelCase__ : Dict=12 , lowerCamelCase__ : str=12 , lowerCamelCase__ : Union[str, Any]=3_072 , lowerCamelCase__ : str="gelu" , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[int]=0.0_2 , lowerCamelCase__ : List[Any]=1e-0_6 , lowerCamelCase__ : Tuple=224 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : int=True , **lowerCamelCase__ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
| 332
| 1
|
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : Tuple = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class _lowerCamelCase ( __lowerCAmelCase ):
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> bool:
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class _lowerCamelCase ( __lowerCAmelCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase = None ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= max_length
SCREAMING_SNAKE_CASE__: List[Any]= max_position_embeddings
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__: Optional[int]= input_ids.shape[-1]
SCREAMING_SNAKE_CASE__: List[Any]= cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f'maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class _lowerCamelCase ( __lowerCAmelCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> Dict:
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f'Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '
'''with `max_length = start_length + max_new_tokens` instead.''' , lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__: List[str]= start_length
SCREAMING_SNAKE_CASE__: List[Any]= max_new_tokens
SCREAMING_SNAKE_CASE__: Tuple= start_length + max_new_tokens
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> bool:
return input_ids.shape[-1] >= self.max_length
class _lowerCamelCase ( __lowerCAmelCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase = None ) -> Any:
SCREAMING_SNAKE_CASE__: Any= max_time
SCREAMING_SNAKE_CASE__: int= time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class _lowerCamelCase ( __lowerCAmelCase ):
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> bool:
return any(criteria(lowerCAmelCase_ , lowerCAmelCase_ ) for criteria in self )
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return stopping_criterium.max_length
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return stopping_criterium.max_length
return None
def A__ ( snake_case_ : StoppingCriteriaList , snake_case_ : int ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= stopping_criteria.max_length
SCREAMING_SNAKE_CASE__: str= deepcopy(snake_case__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , snake_case__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=snake_case__ ) )
return new_stopping_criteria
| 717
|
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
__a = ["torch", "scipy"]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> Any:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 107
| 0
|
from collections import deque
from .hash_table import HashTable
class UpperCAmelCase_ ( __lowerCamelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = self.values[key]
def __UpperCAmelCase ( self ):
return (
sum(self.charge_factor - len(_lowerCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_lowerCAmelCase ) == 0
):
return key
return super()._collision_resolution(_lowerCAmelCase , _lowerCAmelCase )
| 79
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spm_char.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'microsoft/speecht5_asr': 10_24,
'microsoft/speecht5_tts': 10_24,
'microsoft/speecht5_vc': 10_24,
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="<s>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
__UpperCAmelCase: Union[str, Any] = vocab_file
__UpperCAmelCase: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Tuple = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = self.__dict__.copy()
__UpperCAmelCase: List[Any] = None
return state
def __setstate__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase: List[Any] = {}
__UpperCAmelCase: Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
return self.sp_model.piece_to_id(snake_case_ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = self.sp_model.IdToPiece(snake_case_ )
return token
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = []
__UpperCAmelCase: Optional[Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
__UpperCAmelCase: Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def lowercase_ ( self , snake_case_ , snake_case_=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
__UpperCAmelCase: Union[str, Any] = [1]
if token_ids_a is None:
return ([0] * len(snake_case_ )) + suffix_ones
return ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def lowercase_ ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase: Union[str, Any] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , """wb""" ) as fi:
__UpperCAmelCase: Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 523
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ ={
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
UpperCamelCase__ ={
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['input_ids', 'attention_mask']
__snake_case = BartTokenizer
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="replace" , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=False , __lowerCamelCase=True , **__lowerCamelCase , ) -> Tuple:
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
_SCREAMING_SNAKE_CASE : List[Any] = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
_SCREAMING_SNAKE_CASE : Dict = add_prefix_space
_SCREAMING_SNAKE_CASE : List[Any] = pre_tok_class(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_SCREAMING_SNAKE_CASE : Any = "post_processor"
_SCREAMING_SNAKE_CASE : Any = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE : List[str] = tuple(state["sep"] )
if "cls" in state:
_SCREAMING_SNAKE_CASE : List[Any] = tuple(state["cls"] )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
_SCREAMING_SNAKE_CASE : Tuple = add_prefix_space
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets:
_SCREAMING_SNAKE_CASE : Any = trim_offsets
_SCREAMING_SNAKE_CASE : Any = True
if changes_to_apply:
_SCREAMING_SNAKE_CASE : Any = getattr(__lowerCamelCase , state.pop("type" ) )
_SCREAMING_SNAKE_CASE : str = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
_SCREAMING_SNAKE_CASE : List[Any] = value
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> BatchEncoding:
_SCREAMING_SNAKE_CASE : Dict = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> BatchEncoding:
_SCREAMING_SNAKE_CASE : str = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
_SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 709
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'align_text_model'
def __init__( self , __lowerCamelCase=3_0_5_2_2 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-12 , __lowerCamelCase=0 , __lowerCamelCase="absolute" , __lowerCamelCase=True , **__lowerCamelCase , ) -> List[Any]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
_SCREAMING_SNAKE_CASE : int = hidden_act
_SCREAMING_SNAKE_CASE : Any = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
_SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
_SCREAMING_SNAKE_CASE : str = position_embedding_type
_SCREAMING_SNAKE_CASE : Dict = use_cache
_SCREAMING_SNAKE_CASE : List[str] = pad_token_id
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
_SCREAMING_SNAKE_CASE : Union[str, Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'align_vision_model'
def __init__( self , __lowerCamelCase = 3 , __lowerCamelCase = 6_0_0 , __lowerCamelCase = 2.0 , __lowerCamelCase = 3.1 , __lowerCamelCase = 8 , __lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , __lowerCamelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __lowerCamelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __lowerCamelCase = [] , __lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , __lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , __lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , __lowerCamelCase = 0.25 , __lowerCamelCase = "swish" , __lowerCamelCase = 2_5_6_0 , __lowerCamelCase = "mean" , __lowerCamelCase = 0.02 , __lowerCamelCase = 0.001 , __lowerCamelCase = 0.99 , __lowerCamelCase = 0.2 , **__lowerCamelCase , ) -> Dict:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = num_channels
_SCREAMING_SNAKE_CASE : List[Any] = image_size
_SCREAMING_SNAKE_CASE : Dict = width_coefficient
_SCREAMING_SNAKE_CASE : str = depth_coefficient
_SCREAMING_SNAKE_CASE : Union[str, Any] = depth_divisor
_SCREAMING_SNAKE_CASE : List[Any] = kernel_sizes
_SCREAMING_SNAKE_CASE : Tuple = in_channels
_SCREAMING_SNAKE_CASE : Optional[int] = out_channels
_SCREAMING_SNAKE_CASE : List[Any] = depthwise_padding
_SCREAMING_SNAKE_CASE : str = strides
_SCREAMING_SNAKE_CASE : List[str] = num_block_repeats
_SCREAMING_SNAKE_CASE : Tuple = expand_ratios
_SCREAMING_SNAKE_CASE : int = squeeze_expansion_ratio
_SCREAMING_SNAKE_CASE : List[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_dim
_SCREAMING_SNAKE_CASE : Dict = pooling_type
_SCREAMING_SNAKE_CASE : List[Any] = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = batch_norm_eps
_SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
_SCREAMING_SNAKE_CASE : int = drop_connect_rate
_SCREAMING_SNAKE_CASE : Tuple = sum(__lowerCamelCase ) * 4
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
_SCREAMING_SNAKE_CASE : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'align'
__snake_case = True
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=6_4_0 , __lowerCamelCase=1.0 , __lowerCamelCase=0.02 , **__lowerCamelCase , ) -> List[Any]:
super().__init__(**__lowerCamelCase )
if text_config is None:
_SCREAMING_SNAKE_CASE : List[Any] = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
_SCREAMING_SNAKE_CASE : List[str] = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
_SCREAMING_SNAKE_CASE : Dict = AlignTextConfig(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = AlignVisionConfig(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = projection_dim
_SCREAMING_SNAKE_CASE : List[str] = temperature_init_value
_SCREAMING_SNAKE_CASE : Any = initializer_range
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) -> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE : Any = self.text_config.to_dict()
_SCREAMING_SNAKE_CASE : Optional[int] = self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE : Dict = self.__class__.model_type
return output
| 381
| 0
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_snake_case : List[Any] = "pytorch_model.bin"
_snake_case : Dict = "pytorch_model.bin.index.json"
_snake_case : str = "adapter_config.json"
_snake_case : Dict = "adapter_model.bin"
_snake_case : List[str] = "adapter_model.safetensors"
_snake_case : Dict = "tf_model.h5"
_snake_case : int = "tf_model.h5.index.json"
_snake_case : Union[str, Any] = "model.ckpt"
_snake_case : Dict = "flax_model.msgpack"
_snake_case : Union[str, Any] = "flax_model.msgpack.index.json"
_snake_case : Tuple = "model.safetensors"
_snake_case : Union[str, Any] = "model.safetensors.index.json"
_snake_case : Optional[int] = "config.json"
_snake_case : List[Any] = "preprocessor_config.json"
_snake_case : Optional[int] = FEATURE_EXTRACTOR_NAME
_snake_case : Union[str, Any] = "generation_config.json"
_snake_case : int = "modelcard.json"
_snake_case : Optional[Any] = "▁"
_snake_case : Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_snake_case : Optional[Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_snake_case : int = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_snake_case : List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowerCAmelCase_ ( __lowerCamelCase ):
if version.parse(__lowerCamelCase ) < version.parse(__lowerCamelCase ):
if "dev" in min_version:
__snake_case : List[str] = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__snake_case : List[Any] = F'This example requires a minimum version of {min_version},'
error_message += F' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 81
|
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : complex , _lowerCamelCase : str = "x" , _lowerCamelCase : float = 10**-10 , _lowerCamelCase : int = 1 , ) -> complex:
lowerCamelCase_ = symbols(_lowerCamelCase )
lowerCamelCase_ = lambdify(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase_ = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
lowerCamelCase_ = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowerCamelCase_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}''')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 549
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = LxmertConfig.from_json_file(__UpperCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
A__ = LxmertForPreTraining(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 713
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = "."
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = "\n".join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 536
| 0
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : NestedDataStructureLike[PathLike] , UpperCAmelCase_ : Optional[NamedSplit] = None , UpperCAmelCase_ : Optional[Features] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(
UpperCAmelCase_ , split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , num_proc=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = path_or_paths if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__ = Text(
cache_dir=UpperCAmelCase_ , data_files=UpperCAmelCase_ , features=UpperCAmelCase_ , **UpperCAmelCase_ , )
def A_ ( self : Union[str, Any] ):
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase_ , download_mode=UpperCAmelCase_ , verification_mode=UpperCAmelCase_ , base_path=UpperCAmelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase_ , in_memory=self.keep_in_memory )
return dataset
| 472
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(UpperCamelCase_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('*' , UpperCamelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = 'weight_v'
elif "weight" in name:
SCREAMING_SNAKE_CASE__ = 'weight'
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = 'bias'
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = SEWConfig()
if is_finetuned:
SCREAMING_SNAKE_CASE__ = model.wav_encoder.wav_model.cfg
else:
SCREAMING_SNAKE_CASE__ = model.cfg
SCREAMING_SNAKE_CASE__ = fs_config.conv_bias
SCREAMING_SNAKE_CASE__ = eval(fs_config.conv_feature_layers )
SCREAMING_SNAKE_CASE__ = [x[0] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = [x[1] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = [x[2] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = 'gelu'
SCREAMING_SNAKE_CASE__ = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = fs_config.activation_fn.name
SCREAMING_SNAKE_CASE__ = fs_config.encoder_embed_dim
SCREAMING_SNAKE_CASE__ = 0.02
SCREAMING_SNAKE_CASE__ = fs_config.encoder_ffn_embed_dim
SCREAMING_SNAKE_CASE__ = 1e-5
SCREAMING_SNAKE_CASE__ = fs_config.encoder_layerdrop
SCREAMING_SNAKE_CASE__ = fs_config.encoder_attention_heads
SCREAMING_SNAKE_CASE__ = fs_config.conv_pos_groups
SCREAMING_SNAKE_CASE__ = fs_config.conv_pos
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = fs_config.encoder_layers
SCREAMING_SNAKE_CASE__ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
SCREAMING_SNAKE_CASE__ = model.cfg
SCREAMING_SNAKE_CASE__ = fs_config.final_dropout
SCREAMING_SNAKE_CASE__ = fs_config.layerdrop
SCREAMING_SNAKE_CASE__ = fs_config.activation_dropout
SCREAMING_SNAKE_CASE__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
SCREAMING_SNAKE_CASE__ = fs_config.attention_dropout
SCREAMING_SNAKE_CASE__ = fs_config.dropout_input
SCREAMING_SNAKE_CASE__ = fs_config.dropout
SCREAMING_SNAKE_CASE__ = fs_config.mask_channel_length
SCREAMING_SNAKE_CASE__ = fs_config.mask_channel_prob
SCREAMING_SNAKE_CASE__ = fs_config.mask_length
SCREAMING_SNAKE_CASE__ = fs_config.mask_prob
SCREAMING_SNAKE_CASE__ = 'Wav2Vec2FeatureExtractor'
SCREAMING_SNAKE_CASE__ = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True ) -> List[str]:
'''simple docstring'''
if is_finetuned:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
SCREAMING_SNAKE_CASE__ = SEWConfig.from_pretrained(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = convert_config(model[0] , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = model[0].eval()
SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ = Dictionary.load(UpperCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.eos_index
SCREAMING_SNAKE_CASE__ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , 'vocab.json' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCamelCase_ ) )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer(
UpperCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = SEWForCTC(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = SEWModel(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__snake_case = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 472
| 1
|
import collections
import os
import re
from pathlib import Path
lowercase : Union[str, Any] = """src/transformers"""
# Matches is_xxx_available()
lowercase : int = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowercase : str = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase : Optional[Any] = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowercase : int = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowercase : Any = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase : Optional[Any] = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase : Dict = re.compile(R"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase : str = re.compile(R"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowercase : int = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowercase : Dict = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowercase : str = re.compile(R"""^\s*else:""")
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
if _re_test_backend.search(a__ ) is None:
return None
_UpperCamelCase = [b[0] for b in _re_backend.findall(a__ )]
backends.sort()
return "_and_".join(a__ )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
with open(a__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = 0
while line_index < len(a__ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(a__ ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCamelCase = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_UpperCamelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(a__ ):
_UpperCamelCase = _re_one_line_import_struct.search(a__ ).groups()[0]
_UpperCamelCase = re.findall(R'''\[([^\]]+)\]''' , a__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_UpperCamelCase = _re_import_struct_key_value.search(a__ )
if single_line_import_search is not None:
_UpperCamelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(a__ ) > 0]
objects.extend(a__ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_UpperCamelCase = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCamelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCamelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCamelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_UpperCamelCase = lines[line_index]
if _re_import_struct_add_one.search(a__ ) is not None:
objects.append(_re_import_struct_add_one.search(a__ ).groups()[0] )
elif _re_import_struct_add_many.search(a__ ) is not None:
_UpperCamelCase = _re_import_struct_add_many.search(a__ ).groups()[0].split(''', ''' )
_UpperCamelCase = [obj[1:-1] for obj in imports if len(a__ ) > 0]
objects.extend(a__ )
elif _re_between_brackets.search(a__ ) is not None:
_UpperCamelCase = _re_between_brackets.search(a__ ).groups()[0].split(''', ''' )
_UpperCamelCase = [obj[1:-1] for obj in imports if len(a__ ) > 0]
objects.extend(a__ )
elif _re_quote_object.search(a__ ) is not None:
objects.append(_re_quote_object.search(a__ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_UpperCamelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCamelCase = []
while (
line_index < len(a__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_UpperCamelCase = lines[line_index]
_UpperCamelCase = _re_import.search(a__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCamelCase = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(a__ ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCamelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCamelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCamelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_UpperCamelCase = lines[line_index]
_UpperCamelCase = _re_import.search(a__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_UpperCamelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
def find_duplicates(lowerCAmelCase ):
return [k for k, v in collections.Counter(a__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCamelCase = []
for key in import_dict_objects.keys():
_UpperCamelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_UpperCamelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCamelCase = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def SCREAMING_SNAKE_CASE ( ):
_UpperCamelCase = []
for root, _, files in os.walk(a__ ):
if "__init__.py" in files:
_UpperCamelCase = os.path.join(a__ , '''__init__.py''' )
_UpperCamelCase = parse_init(a__ )
if objects is not None:
_UpperCamelCase = analyze_results(*a__ )
if len(a__ ) > 0:
_UpperCamelCase = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(a__ ) )
if len(a__ ) > 0:
raise ValueError('''\n\n'''.join(a__ ) )
def SCREAMING_SNAKE_CASE ( ):
_UpperCamelCase = []
for path, directories, files in os.walk(a__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(a__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(a__ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_UpperCamelCase = str((Path(a__ ) / folder).relative_to(a__ ) )
_UpperCamelCase = short_path.replace(os.path.sep , '''.''' )
submodules.append(a__ )
for fname in files:
if fname == "__init__.py":
continue
_UpperCamelCase = str((Path(a__ ) / fname).relative_to(a__ ) )
_UpperCamelCase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(a__ )
return submodules
lowercase : Tuple = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def SCREAMING_SNAKE_CASE ( ):
from transformers.utils import direct_transformers_import
_UpperCamelCase = direct_transformers_import(a__ )
_UpperCamelCase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(a__ , '''__init__.py''' ) , '''r''' ) as f:
_UpperCamelCase = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , a__ ) ) )
_UpperCamelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(a__ ) > 0:
_UpperCamelCase = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 714
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowercase : str = logging.get_logger(__name__)
class __A( __UpperCAmelCase ):
def __init__( self, *A, **A ):
"""simple docstring"""
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''', A, )
super().__init__(*A, **A )
| 105
| 0
|
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ): # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase ):
def _snake_case ( self ) -> Any:
lowerCAmelCase = {}
lowerCAmelCase = []
lowerCAmelCase = 1
lowerCAmelCase = [1, 2]
lowerCAmelCase = {"""a""": 1, """b""": 2}
lowerCAmelCase = {"""a""": [1, 2], """b""": [3, 4]}
lowerCAmelCase = {"""a""": {"""1""": 1}, """b""": 2}
lowerCAmelCase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
lowerCAmelCase = {}
lowerCAmelCase = []
lowerCAmelCase = 2
lowerCAmelCase = [2, 3]
lowerCAmelCase = {"""a""": 2, """b""": 3}
lowerCAmelCase = {"""a""": [2, 3], """b""": [4, 5]}
lowerCAmelCase = {"""a""": {"""1""": 2}, """b""": 3}
lowerCAmelCase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(lowercase , lowercase ) , lowercase )
self.assertEqual(map_nested(lowercase , lowercase ) , lowercase )
self.assertEqual(map_nested(lowercase , lowercase ) , lowercase )
self.assertEqual(map_nested(lowercase , lowercase ) , lowercase )
self.assertEqual(map_nested(lowercase , lowercase ) , lowercase )
self.assertEqual(map_nested(lowercase , lowercase ) , lowercase )
self.assertEqual(map_nested(lowercase , lowercase ) , lowercase )
self.assertEqual(map_nested(lowercase , lowercase ) , lowercase )
lowerCAmelCase = 2
self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase )
self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase )
self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase )
self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase )
self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase )
self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase )
self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase )
self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase )
lowerCAmelCase = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
lowerCAmelCase = {"""a""": 2, """b""": 0, """c""": 2}
lowerCAmelCase = {
"""a""": np.eye(2 ).astype(lowercase ),
"""b""": np.zeros(3 ).astype(lowercase ),
"""c""": np.ones(2 ).astype(lowercase ),
}
self.assertEqual(map_nested(lowercase , lowercase , map_numpy=lowercase ) , lowercase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowercase , lowercase , map_numpy=lowercase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(lowercase , lowercase , map_numpy=lowercase , num_proc=lowercase ) , lowercase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowercase , lowercase , map_numpy=lowercase , num_proc=lowercase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(lowercase ): # can't pickle a local lambda
map_nested(lambda lowercase : x + 1 , lowercase , num_proc=lowercase )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = {"""a""": 1, """b""": 2}
lowerCAmelCase = {"""a""": 3, """b""": 4}
lowerCAmelCase = {"""a""": 5, """b""": 6}
lowerCAmelCase = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowercase , lowercase , lowercase ) ) , lowercase )
def _snake_case ( self ) -> Optional[Any]:
class lowercase :
_SCREAMING_SNAKE_CASE = 'bar'
lowerCAmelCase = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(lowercase , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
lowerCAmelCase = {F'{i}': i for i in range(SCREAMING_SNAKE_CASE )}
lowerCAmelCase = map_nested(lambda SCREAMING_SNAKE_CASE : x + 10 , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowercase ( _UpperCAmelCase ):
@require_tf
def _snake_case ( self ) -> Optional[int]:
import tensorflow as tf
from tensorflow.keras import layers
lowerCAmelCase = layers.Dense(2 )
def gen_random_output():
lowerCAmelCase = tf.random.uniform((1, 3) )
return model(lowercase ).numpy()
with temp_seed(42 , set_tensorflow=lowercase ):
lowerCAmelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=lowercase ):
lowerCAmelCase = gen_random_output()
lowerCAmelCase = gen_random_output()
np.testing.assert_equal(lowercase , lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _snake_case ( self ) -> Tuple:
import torch
def gen_random_output():
lowerCAmelCase = torch.nn.Linear(3 , 2 )
lowerCAmelCase = torch.rand(1 , 3 )
return model(lowercase ).detach().numpy()
with temp_seed(42 , set_pytorch=lowercase ):
lowerCAmelCase = gen_random_output()
with temp_seed(42 , set_pytorch=lowercase ):
lowerCAmelCase = gen_random_output()
lowerCAmelCase = gen_random_output()
np.testing.assert_equal(lowercase , lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _snake_case ( self ) -> Union[str, Any]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
lowerCAmelCase = gen_random_output()
with temp_seed(42 ):
lowerCAmelCase = gen_random_output()
lowerCAmelCase = gen_random_output()
np.testing.assert_equal(lowercase , lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = NestedDataStructure(SCREAMING_SNAKE_CASE ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = NestedDataStructure(SCREAMING_SNAKE_CASE ).flatten()
assert output == expected_output
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = A(x=1 , y="""foobar""" )
lowerCAmelCase = {"""x""": 1, """y""": """foobar"""}
assert asdict(SCREAMING_SNAKE_CASE ) == expected_output
lowerCAmelCase = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
lowerCAmelCase = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(SCREAMING_SNAKE_CASE ) == expected_output
with pytest.raises(SCREAMING_SNAKE_CASE ):
asdict([1, A(x=10 , y="""foo""" )] )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return text.split()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def UpperCAmelCase__ ( ):
'''simple docstring'''
with Pool(2 ) as pool:
lowerCAmelCase = list(iflatmap_unordered(SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(SCREAMING_SNAKE_CASE ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowerCAmelCase = list(iflatmap_unordered(SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(SCREAMING_SNAKE_CASE ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowerCAmelCase = []
for yield_time, content in iflatmap_unordered(
SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(SCREAMING_SNAKE_CASE )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(SCREAMING_SNAKE_CASE ) == 4
| 532
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE__ = {
"allenai/led-base-16384": 16_384,
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = LEDTokenizer
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> Any:
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
lowerCAmelCase = getattr(lowercase , pre_tok_state.pop("""type""" ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**lowercase )
lowerCAmelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase = """post_processor"""
lowerCAmelCase = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state["""sep"""] )
if "cls" in state:
lowerCAmelCase = tuple(state["""cls"""] )
lowerCAmelCase = False
if state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get("""trim_offsets""" , lowercase ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(lowercase , state.pop("""type""" ) )
lowerCAmelCase = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _snake_case ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case ( self , lowercase ) -> Optional[int]:
lowerCAmelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
lowerCAmelCase = value
def _snake_case ( self , *lowercase , **lowercase ) -> BatchEncoding:
lowerCAmelCase = kwargs.get("""is_split_into_words""" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowercase , **lowercase )
def _snake_case ( self , *lowercase , **lowercase ) -> BatchEncoding:
lowerCAmelCase = kwargs.get("""is_split_into_words""" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def _snake_case ( self , lowercase , lowercase=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self , lowercase , lowercase = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict:
lowerCAmelCase = super()._pad(
encoded_inputs=lowercase , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowercase )
if needs_to_be_padded:
lowerCAmelCase = len(lowercase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 532
| 1
|
from bisect import bisect
from itertools import accumulate
def snake_case_ ( A_ : Any, A_ : Any, A_ : List[str], A_ : int ):
'''simple docstring'''
_lowerCamelCase : Dict = sorted(zip(A_, A_ ), key=lambda A_ : x[0] / x[1], reverse=A_ )
_lowerCamelCase : Union[str, Any] = [i[0] for i in r], [i[1] for i in r]
_lowerCamelCase : int = list(accumulate(A_ ) )
_lowerCamelCase : str = bisect(A_, A_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __snake_case ( _lowercase):
snake_case__ : str = "donut-swin"
snake_case__ : Tuple = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , __lowerCAmelCase : Optional[int]=2_2_4 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : int=9_6 , __lowerCAmelCase : int=[2, 2, 6, 2] , __lowerCAmelCase : str=[3, 6, 1_2, 2_4] , __lowerCAmelCase : int=7 , __lowerCAmelCase : List[Any]=4.0 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : List[str]=1E-5 , **__lowerCAmelCase : int , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Optional[Any] = embed_dim
_lowerCamelCase : int = depths
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : Union[str, Any] = mlp_ratio
_lowerCamelCase : Optional[Any] = qkv_bias
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Optional[int] = use_absolute_embeddings
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : List[str] = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
| 598
| 0
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __UpperCamelCase ( A , A , A , A , A ):
UpperCamelCase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__snake_case )] )
UpperCamelCase__ = np.array(__snake_case )
UpperCamelCase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __snake_case ) ) , x.transpose() ) , __snake_case )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __UpperCamelCase ( A , A , A ):
UpperCamelCase__ = (1, 2, 1)
UpperCamelCase__ = (1, 1, 0, 7)
UpperCamelCase__ = SARIMAX(
__snake_case , exog=__snake_case , order=__snake_case , seasonal_order=__snake_case )
UpperCamelCase__ = model.fit(disp=__snake_case , maxiter=600 , method='''nm''' )
UpperCamelCase__ = model_fit.predict(1 , len(__snake_case ) , exog=[test_match] )
return result[0]
def __UpperCamelCase ( A , A , A ):
UpperCamelCase__ = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__snake_case , __snake_case )
UpperCamelCase__ = regressor.predict(__snake_case )
return y_pred[0]
def __UpperCamelCase ( A ):
train_user.sort()
UpperCamelCase__ = np.percentile(__snake_case , 25 )
UpperCamelCase__ = np.percentile(__snake_case , 75 )
UpperCamelCase__ = qa - qa
UpperCamelCase__ = qa - (iqr * 0.1)
return low_lim
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = 0
UpperCamelCase__ = 0
for i in list_vote:
if i > actual_result:
UpperCamelCase__ = not_safe + 1
else:
if abs(abs(__snake_case ) - abs(__snake_case ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__magic_name__ =[[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
__magic_name__ =pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
__magic_name__ =Normalizer().fit_transform(data_input_df.values)
# split data
__magic_name__ =normalize_df[:, 2].tolist()
__magic_name__ =normalize_df[:, 0].tolist()
__magic_name__ =normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__magic_name__ =normalize_df[:, [1, 2]].tolist()
__magic_name__ =x[: len(x) - 1]
__magic_name__ =x[len(x) - 1 :]
# for linear regression & sarimax
__magic_name__ =total_date[: len(total_date) - 1]
__magic_name__ =total_user[: len(total_user) - 1]
__magic_name__ =total_match[: len(total_match) - 1]
__magic_name__ =total_date[len(total_date) - 1 :]
__magic_name__ =total_user[len(total_user) - 1 :]
__magic_name__ =total_match[len(total_match) - 1 :]
# voting system with forecasting
__magic_name__ =[
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__magic_name__ ='''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 415
|
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase : str = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def __lowerCAmelCase ( ):
__lowerCAmelCase = os.path.dirname(os.path.realpath(__snake_case ) )
__lowerCAmelCase = os.path.join(__snake_case , "words.txt" )
__lowerCAmelCase = ""
with open(__snake_case ) as f:
__lowerCAmelCase = f.readline()
__lowerCAmelCase = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
__lowerCAmelCase = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 367
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =(UniPCMultistepScheduler,)
lowercase_ : Tuple =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.1014) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
def A__ ( self ,**A__):
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 633
|
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633
| 1
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Dict=5 ):
'''simple docstring'''
assert masked_input.count("""<mask>""" ) == 1
snake_case_ : List[str] = torch.tensor(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1
snake_case_ : List[str] = model(__UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple
snake_case_ : List[Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
snake_case_ : List[str] = logits[0, masked_index, :]
snake_case_ : Any = logits.softmax(dim=0 )
snake_case_ , snake_case_ : str = prob.topk(k=__UpperCamelCase , dim=0 )
snake_case_ : Optional[Any] = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__UpperCamelCase ) )] )
snake_case_ : Union[str, Any] = tokenizer.mask_token
snake_case_ : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
snake_case_ : Dict = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(__UpperCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(__UpperCamelCase ) , __UpperCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__UpperCamelCase , __UpperCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__lowerCAmelCase : Dict = CamembertTokenizer.from_pretrained('''camembert-base''')
__lowerCAmelCase : List[str] = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
__lowerCAmelCase : str = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 58
|
def _lowerCamelCase ( __A : int ) -> str:
_UpperCAmelCase : Tuple = int(__A )
if decimal in (0, 1): # Exit cases for the recursion
return str(__A )
_UpperCAmelCase , _UpperCAmelCase : int = divmod(__A , 2 )
return binary_recursive(__A ) + str(__A )
def _lowerCamelCase ( __A : str ) -> str:
_UpperCAmelCase : List[Any] = str(__A ).strip()
if not number:
raise ValueError('''No input value was provided''' )
_UpperCAmelCase : Tuple = '''-''' if number.startswith('''-''' ) else ''''''
_UpperCAmelCase : Dict = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'''{negative}0b{binary_recursive(int(__A ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 485
| 0
|
'''simple docstring'''
import sys
import turtle
def __snake_case ( SCREAMING_SNAKE_CASE_ : tuple[float, float] , SCREAMING_SNAKE_CASE_ : tuple[float, float] ) -> tuple[float, float]:
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __snake_case ( SCREAMING_SNAKE_CASE_ : tuple[float, float] , SCREAMING_SNAKE_CASE_ : tuple[float, float] , SCREAMING_SNAKE_CASE_ : tuple[float, float] , SCREAMING_SNAKE_CASE_ : int , ) -> None:
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE_ , get_mid(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , get_mid(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , depth - 1 )
triangle(SCREAMING_SNAKE_CASE_ , get_mid(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , get_mid(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , depth - 1 )
triangle(SCREAMING_SNAKE_CASE_ , get_mid(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , get_mid(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
a__ : Optional[Any] = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
a__ : int = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 570
|
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
a__ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any] , a__ : Optional[Any] , a__ : Any=768 ):
super().__init__(a__ )
UpperCAmelCase = proj_size
UpperCAmelCase = CLIPVisionModel(a__ )
UpperCAmelCase = PaintByExampleMapper(a__ )
UpperCAmelCase = nn.LayerNorm(config.hidden_size )
UpperCAmelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCAmelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __snake_case ( self : List[str] , a__ : Any , a__ : Any=False ):
UpperCAmelCase = self.model(pixel_values=a__ )
UpperCAmelCase = clip_output.pooler_output
UpperCAmelCase = self.mapper(latent_states[:, None] )
UpperCAmelCase = self.final_layer_norm(a__ )
UpperCAmelCase = self.proj_out(a__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , a__ : Tuple ):
super().__init__()
UpperCAmelCase = (config.num_hidden_layers + 1) // 5
UpperCAmelCase = config.hidden_size
UpperCAmelCase = 1
UpperCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(a__ , a__ , a__ , activation_fn='''gelu''' , attention_bias=a__ )
for _ in range(a__ )
] )
def __snake_case ( self : Dict , a__ : str ):
for block in self.blocks:
UpperCAmelCase = block(a__ )
return hidden_states
| 570
| 1
|
def __lowerCamelCase ( __a :List[str] = 5_0 ) -> Optional[Any]:
"""simple docstring"""
A__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 176
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__magic_name__: List[Any] = False
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self , lowerCAmelCase__=32 ) -> Optional[Any]:
set_seed(0 )
__magic_name__ : Optional[Any] = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3 )
__magic_name__ : Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1 )
return model, optimizer
@slow
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__magic_name__ : Dict = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
__magic_name__ : Any = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__magic_name__ : List[Any] = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCAmelCase__ ) for _ in range(4 )]
__magic_name__ : Optional[int] = [torch.randn((4, 3, 32, 32) ).to(lowerCAmelCase__ ) for _ in range(4 )]
__magic_name__ : Optional[int] = [torch.randint(0 , 10_00 , (4,) ).long().to(lowerCAmelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
__magic_name__ ,__magic_name__ : Dict = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
__magic_name__ : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ , timesteps[i] ).sample
__magic_name__ : Optional[int] = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__magic_name__ ,__magic_name__ : Union[str, Any] = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
__magic_name__ : Tuple = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ , timesteps[i] ).sample
__magic_name__ : List[str] = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) )
| 324
| 0
|
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''Input value must be an \'int\' type''' )
__lowerCamelCase : Dict =0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCamelCase = logging.get_logger(__name__)
# General docstring
_UpperCamelCase = 'RegNetConfig'
# Base docstring
_UpperCamelCase = 'facebook/regnet-y-040'
_UpperCamelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCamelCase = 'facebook/regnet-y-040'
_UpperCamelCase = 'tabby, tabby cat'
_UpperCamelCase = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Dict , __lowercase :int , __lowercase :int = 3 , __lowercase :int = 1 , __lowercase :int = 1 , __lowercase :Optional[str] = "relu" , **__lowercase :int , ):
super().__init__(**__lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCamelCase : int =tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__lowerCamelCase : str =tf.keras.layers.ConvaD(
filters=__lowercase , kernel_size=__lowercase , strides=__lowercase , padding='''VALID''' , groups=__lowercase , use_bias=__lowercase , name='''convolution''' , )
__lowerCamelCase : str =tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
__lowerCamelCase : Optional[int] =ACTaFN[activation] if activation is not None else tf.identity
def __lowercase ( self :Optional[int] , __lowercase :Any ):
__lowerCamelCase : str =self.convolution(self.padding(__lowercase ) )
__lowerCamelCase : Optional[int] =self.normalization(__lowercase )
__lowerCamelCase : Any =self.activation(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , **__lowercase :Any ):
super().__init__(**__lowercase )
__lowerCamelCase : Tuple =config.num_channels
__lowerCamelCase : Union[str, Any] =TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def __lowercase ( self :int , __lowercase :List[str] ):
__lowerCamelCase : int =shape_list(__lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCamelCase : Union[str, Any] =tf.transpose(__lowercase , perm=(0, 2, 3, 1) )
__lowerCamelCase : Optional[int] =self.embedder(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :List[Any] , __lowercase :int , __lowercase :int = 2 , **__lowercase :Optional[int] ):
super().__init__(**__lowercase )
__lowerCamelCase : int =tf.keras.layers.ConvaD(
filters=__lowercase , kernel_size=1 , strides=__lowercase , use_bias=__lowercase , name='''convolution''' )
__lowerCamelCase : List[str] =tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def __lowercase ( self :Optional[Any] , __lowercase :tf.Tensor , __lowercase :bool = False ):
return self.normalization(self.convolution(__lowercase ) , training=__lowercase )
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Dict , __lowercase :int , __lowercase :int , **__lowercase :List[str] ):
super().__init__(**__lowercase )
__lowerCamelCase : int =tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name='''pooler''' )
__lowerCamelCase : int =[
tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def __lowercase ( self :Dict , __lowercase :Union[str, Any] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCamelCase : Any =self.pooler(__lowercase )
for layer_module in self.attention:
__lowerCamelCase : Any =layer_module(__lowercase )
__lowerCamelCase : Dict =hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Optional[int] , __lowercase :RegNetConfig , __lowercase :int , __lowercase :int , __lowercase :int = 1 , **__lowercase :str ):
super().__init__(**__lowercase )
__lowerCamelCase : Dict =in_channels != out_channels or stride != 1
__lowerCamelCase : int =max(1 , out_channels // config.groups_width )
__lowerCamelCase : List[str] =(
TFRegNetShortCut(__lowercase , stride=__lowercase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCamelCase : str =[
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
__lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name='''layer.2''' ),
]
__lowerCamelCase : Optional[int] =ACTaFN[config.hidden_act]
def __lowercase ( self :int , __lowercase :Optional[int] ):
__lowerCamelCase : List[Any] =hidden_state
for layer_module in self.layers:
__lowerCamelCase : str =layer_module(__lowercase )
__lowerCamelCase : List[Any] =self.shortcut(__lowercase )
hidden_state += residual
__lowerCamelCase : Optional[int] =self.activation(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , __lowercase :int , __lowercase :int , __lowercase :int = 1 , **__lowercase :List[str] ):
super().__init__(**__lowercase )
__lowerCamelCase : Optional[Any] =in_channels != out_channels or stride != 1
__lowerCamelCase : Optional[Any] =max(1 , out_channels // config.groups_width )
__lowerCamelCase : Dict =(
TFRegNetShortCut(__lowercase , stride=__lowercase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
__lowerCamelCase : Union[str, Any] =[
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
__lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(__lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name='''layer.3''' ),
]
__lowerCamelCase : Tuple =ACTaFN[config.hidden_act]
def __lowercase ( self :Tuple , __lowercase :Tuple ):
__lowerCamelCase : List[Any] =hidden_state
for layer_module in self.layers:
__lowerCamelCase : int =layer_module(__lowercase )
__lowerCamelCase : List[str] =self.shortcut(__lowercase )
hidden_state += residual
__lowerCamelCase : List[str] =self.activation(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :int , __lowercase :RegNetConfig , __lowercase :int , __lowercase :int , __lowercase :int = 2 , __lowercase :int = 2 , **__lowercase :Union[str, Any] ):
super().__init__(**__lowercase )
__lowerCamelCase : List[str] =TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__lowerCamelCase : List[Any] =[
# downsampling is done in the first layer with stride of 2
layer(__lowercase , __lowercase , __lowercase , stride=__lowercase , name='''layers.0''' ),
*[layer(__lowercase , __lowercase , __lowercase , name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def __lowercase ( self :int , __lowercase :List[str] ):
for layer_module in self.layers:
__lowerCamelCase : int =layer_module(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :List[Any] , __lowercase :RegNetConfig , **__lowercase :List[str] ):
super().__init__(**__lowercase )
__lowerCamelCase : Optional[int] =[]
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
__lowerCamelCase : Any =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__lowercase , __lowercase , __lowercase , depth=__lowercase , name=f'stages.{i+1}' ) )
def __lowercase ( self :str , __lowercase :tf.Tensor , __lowercase :bool = False , __lowercase :bool = True ):
__lowerCamelCase : Optional[Any] =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase : Dict =hidden_states + (hidden_state,)
__lowerCamelCase : List[Any] =stage_module(__lowercase )
if output_hidden_states:
__lowerCamelCase : Union[str, Any] =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowercase , hidden_states=__lowercase )
@keras_serializable
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
__snake_case : Optional[int] = RegNetConfig
def __init__( self :List[Any] , __lowercase :Dict , **__lowercase :Union[str, Any] ):
super().__init__(**__lowercase )
__lowerCamelCase : int =config
__lowerCamelCase : List[str] =TFRegNetEmbeddings(__lowercase , name='''embedder''' )
__lowerCamelCase : List[str] =TFRegNetEncoder(__lowercase , name='''encoder''' )
__lowerCamelCase : List[Any] =tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name='''pooler''' )
@unpack_inputs
def __lowercase ( self :List[Any] , __lowercase :tf.Tensor , __lowercase :Optional[bool] = None , __lowercase :Optional[bool] = None , __lowercase :bool = False , ):
__lowerCamelCase : Union[str, Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple =self.embedder(__lowercase , training=__lowercase )
__lowerCamelCase : Optional[Any] =self.encoder(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase )
__lowerCamelCase : str =encoder_outputs[0]
__lowerCamelCase : Tuple =self.pooler(__lowercase )
# Change to NCHW output format have uniformity in the modules
__lowerCamelCase : int =tf.transpose(__lowercase , perm=(0, 3, 1, 2) )
__lowerCamelCase : Any =tf.transpose(__lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCamelCase : str =tuple([tf.transpose(__lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase , pooler_output=__lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Optional[int] = RegNetConfig
__snake_case : int = """regnet"""
__snake_case : int = """pixel_values"""
@property
def __lowercase ( self :List[str] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_UpperCamelCase = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
_UpperCamelCase = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , snake_case__ , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , *__lowercase :List[str] , **__lowercase :int ):
super().__init__(__lowercase , *__lowercase , **__lowercase )
__lowerCamelCase : Tuple =TFRegNetMainLayer(__lowercase , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowercase ( self :Optional[Any] , __lowercase :tf.Tensor , __lowercase :Optional[bool] = None , __lowercase :Optional[bool] = None , __lowercase :Optional[int]=False , ):
__lowerCamelCase : List[Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Dict =self.regnet(
pixel_values=__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case__ , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , *__lowercase :List[Any] , **__lowercase :Dict ):
super().__init__(__lowercase , *__lowercase , **__lowercase )
__lowerCamelCase : Optional[int] =config.num_labels
__lowerCamelCase : Optional[int] =TFRegNetMainLayer(__lowercase , name='''regnet''' )
# classification head
__lowerCamelCase : Union[str, Any] =[
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowercase ( self :List[Any] , __lowercase :tf.Tensor = None , __lowercase :tf.Tensor = None , __lowercase :bool = None , __lowercase :bool = None , __lowercase :int=False , ):
__lowerCamelCase : str =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : str =self.regnet(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase )
__lowerCamelCase : Any =outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : List[str] =self.classifier[0](__lowercase )
__lowerCamelCase : str =self.classifier[1](__lowercase )
__lowerCamelCase : str =None if labels is None else self.hf_compute_loss(labels=__lowercase , logits=__lowercase )
if not return_dict:
__lowerCamelCase : Optional[int] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states )
| 363
| 0
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCamelCase_( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
_lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCamelCase : Dict = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCamelCase : Dict = (n, e)
_lowerCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 46
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = DanceDiffusionPipeline
a__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
a__ = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
a__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase__ , use_timestep_embedding=lowercase__ , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
__UpperCAmelCase = IPNDMScheduler()
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> Dict:
if str(lowercase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = DanceDiffusionPipeline(**lowercase__ )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__UpperCAmelCase = self.get_dummy_inputs(lowercase__ )
__UpperCAmelCase = pipe(**lowercase__ )
__UpperCAmelCase = output.audios
__UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase_ (self ) -> Union[str, Any]:
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase_ (self ) -> List[str]:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def lowerCAmelCase_ (self ) -> Optional[int]:
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase_ (self ) -> Any:
return super().test_attention_slicing_forward_pass()
def lowerCAmelCase_ (self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = torch_device
__UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(generator=lowercase__ , num_inference_steps=100 , audio_length_in_s=4.096 )
__UpperCAmelCase = output.audios
__UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = torch_device
__UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(generator=lowercase__ , num_inference_steps=100 , audio_length_in_s=4.096 )
__UpperCAmelCase = output.audios
__UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 303
| 0
|
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=3 , lowercase=None , lowercase=2 , ):
A_ : Optional[Any] = parent
A_ : List[Any] = batch_size
A_ : Any = image_size
A_ : List[str] = patch_size
A_ : Dict = num_channels
A_ : Any = is_training
A_ : int = use_labels
A_ : int = hidden_size
A_ : int = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = intermediate_size
A_ : Any = hidden_act
A_ : Dict = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = scope
A_ : Any = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A_ : int = (image_size // patch_size) ** 2
A_ : Tuple = num_patches + 2
def _a (self ):
A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Any = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Tuple = self.get_config()
return config, pixel_values, labels
def _a (self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[str] = DeiTModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
A_ : List[Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Optional[int] = DeiTForMaskedImageModeling(config=lowercase__ )
model.to(lowercase__ )
model.eval()
A_ : Optional[int] = model(lowercase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : str = 1
A_ : List[str] = DeiTForMaskedImageModeling(lowercase__ )
model.to(lowercase__ )
model.eval()
A_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[Any] = model(lowercase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Optional[int] = self.type_sequence_label_size
A_ : int = DeiTForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
A_ : Any = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Any = 1
A_ : Union[str, Any] = DeiTForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
A_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Dict = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
A_
) : str = config_and_inputs
A_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = False
def _a (self ):
A_ : List[str] = DeiTModelTester(self )
A_ : List[str] = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def _a (self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(lowercase__ )
A_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : str = [*signature.parameters.keys()]
A_ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase__ )
def _a (self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase__ )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
def _a (self , lowercase , lowercase , lowercase=False ):
A_ : Tuple = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a (self ):
if not self.model_tester.is_training:
return
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A_ : Optional[Any] = model_class(lowercase__ )
model.to(lowercase__ )
model.train()
A_ : Optional[int] = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
A_ : List[Any] = model(**lowercase__ ).loss
loss.backward()
def _a (self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : Union[str, Any] = False
A_ : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A_ : Any = model_class(lowercase__ )
model.gradient_checkpointing_enable()
model.to(lowercase__ )
model.train()
A_ : List[Any] = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
A_ : Union[str, Any] = model(**lowercase__ ).loss
loss.backward()
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase__ ),
*get_values(lowercase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
A_ : int = problem_type["title"]
A_ : Union[str, Any] = problem_type["num_labels"]
A_ : List[Any] = model_class(lowercase__ )
model.to(lowercase__ )
model.train()
A_ : int = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if problem_type["num_labels"] > 1:
A_ : Optional[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A_ : Optional[int] = inputs["labels"].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase__ ) as warning_list:
A_ : Any = model(**lowercase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def _a (self ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Tuple = DeiTModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def a ( ):
'''simple docstring'''
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _a (self ):
A_ : str = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
lowercase__ )
A_ : Tuple = self.default_image_processor
A_ : Dict = prepare_img()
A_ : Dict = image_processor(images=lowercase__ , return_tensors="""pt""" ).to(lowercase__ )
# forward pass
with torch.no_grad():
A_ : List[str] = model(**lowercase__ )
# verify the logits
A_ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
A_ : Dict = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _a (self ):
A_ : str = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : List[str] = self.default_image_processor
A_ : str = prepare_img()
A_ : str = image_processor(images=lowercase__ , return_tensors="""pt""" )
A_ : Union[str, Any] = inputs.pixel_values.to(lowercase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : List[str] = model(lowercase__ )
| 702
|
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase )
| 686
| 0
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__lowercase : str = False
class _A ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
snake_case : int = torch.manual_seed(0 )
snake_case : List[Any] = pipe(
image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type="""numpy""" ,).images
snake_case : Optional[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case : List[str] = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 36
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
snake_case__ : Tuple = parser.parse_args()
snake_case__ : Dict = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
snake_case__ : Optional[int] = CLIPImageProcessor()
snake_case__ : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
snake_case__ : Dict = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 278
| 0
|
'''simple docstring'''
import pytest
UpperCAmelCase_ : Any = "__dummy_dataset1__"
UpperCAmelCase_ : str = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowerCAmelCase_ ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCAmelCase_ ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Dict =dataset_loading_script_name
__magic_name__ : Union[str, Any] =tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=UpperCamelCase__ )
__magic_name__ : Optional[int] =script_dir / F"{script_name}.py"
with open(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
| 715
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCAmelCase_ : Dict = 637_8137.0
UpperCAmelCase_ : List[Any] = 635_6752.31_4245
UpperCAmelCase_ : List[str] = 6378137
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =(AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__magic_name__ : str =atan((1 - flattening) * tan(radians(lowerCamelCase ) ) )
__magic_name__ : List[Any] =atan((1 - flattening) * tan(radians(lowerCamelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__magic_name__ : List[Any] =haversine_distance(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__magic_name__ : Tuple =(b_lata + b_lata) / 2
__magic_name__ : int =(b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__magic_name__ : Optional[int] =(sin(lowerCamelCase ) ** 2) * (cos(lowerCamelCase ) ** 2)
__magic_name__ : Any =cos(sigma / 2 ) ** 2
__magic_name__ : List[Any] =(sigma - sin(lowerCamelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__magic_name__ : Any =(cos(lowerCamelCase ) ** 2) * (sin(lowerCamelCase ) ** 2)
__magic_name__ : Optional[Any] =sin(sigma / 2 ) ** 2
__magic_name__ : str =(sigma + sin(lowerCamelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
| 0
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( a ):
def __init__( self : Tuple , *_UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = eval_examples
SCREAMING_SNAKE_CASE = post_process_function
SCREAMING_SNAKE_CASE = quant_trainer_args
SCREAMING_SNAKE_CASE = 128 # default number of calibration samples
def __snake_case( self : List[str] , _UpperCamelCase : int=None ) -> Dict:
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE = self._remove_unused_columns(_UpperCamelCase , description="Calibration" )
return DataLoader(
_UpperCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_UpperCamelCase , )
def __snake_case( self : Any , _UpperCamelCase : Dict=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE = self.get_calib_dataloader(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(_UpperCamelCase , self.quant_trainer_args , calib=_UpperCamelCase )
model.eval()
quant_trainer.enable_calibration(_UpperCamelCase )
logger.info("***** Running calibration *****" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(_UpperCamelCase ):
# Prediction step
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prediction_step(_UpperCamelCase , _UpperCamelCase , prediction_loss_only=_UpperCamelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_UpperCamelCase , self.quant_trainer_args )
SCREAMING_SNAKE_CASE = model
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Any=None , _UpperCamelCase : str = "eval" ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE = self.get_eval_dataloader(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE = self.compute_metrics
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE = eval_loop(
_UpperCamelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCamelCase , )
finally:
SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE = self.post_process_function(_UpperCamelCase , _UpperCamelCase , output.predictions )
SCREAMING_SNAKE_CASE = self.compute_metrics(_UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE = metrics.pop(_UpperCamelCase )
self.log(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCamelCase )
return metrics
def __snake_case( self : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : str = "test" ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_test_dataloader(_UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE = self.compute_metrics
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE = eval_loop(
_UpperCamelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCamelCase , )
finally:
SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE = self.post_process_function(_UpperCamelCase , _UpperCamelCase , output.predictions , "predict" )
SCREAMING_SNAKE_CASE = self.compute_metrics(_UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE = metrics.pop(_UpperCamelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCamelCase )
def __snake_case( self : Optional[int] , _UpperCamelCase : Tuple="./" ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.eval_dataset
SCREAMING_SNAKE_CASE = self.get_eval_dataloader(_UpperCamelCase )
SCREAMING_SNAKE_CASE = next(iter(_UpperCamelCase ) )
# saving device - to make it consistent
SCREAMING_SNAKE_CASE = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
SCREAMING_SNAKE_CASE = tuple(v.to(_UpperCamelCase ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.model.to(_UpperCamelCase )
model.eval()
model.float()
SCREAMING_SNAKE_CASE = model.module if hasattr(_UpperCamelCase , "module" ) else model
quant_trainer.configure_model(_UpperCamelCase , self.quant_trainer_args )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , "model.onnx" )
logger.info(F"exporting model to {output_model_file}" )
SCREAMING_SNAKE_CASE = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , export_params=_UpperCamelCase , opset_version=13 , do_constant_folding=_UpperCamelCase , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=_UpperCamelCase , )
logger.info("onnx export finished" )
| 403
|
def __lowerCamelCase (UpperCAmelCase__ : Dict ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ )
# create the counting array
SCREAMING_SNAKE_CASE = coll_max + 1 - coll_min
SCREAMING_SNAKE_CASE = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = counting_arr[i] + counting_arr[i - 1]
# create the output collection
SCREAMING_SNAKE_CASE = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
return "".join([chr(UpperCAmelCase__ ) for i in counting_sort([ord(UpperCAmelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
_lowerCamelCase : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCamelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 403
| 1
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _lowerCAmelCase ( __a ) -> Any:
'''simple docstring'''
_UpperCamelCase :List[Any] =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a_ , a_ )
def _lowerCAmelCase ( __a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase :Dict =emb.weight.shape
_UpperCamelCase :str =nn.Linear(a_ , a_ , bias=a_ )
_UpperCamelCase :Union[str, Any] =emb.weight.data
return lin_layer
def _lowerCAmelCase ( __a ) -> Any:
'''simple docstring'''
_UpperCamelCase :Dict =torch.load(a_ , map_location="""cpu""" )
_UpperCamelCase :Tuple =mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
_UpperCamelCase :Optional[int] =mam_aaa['''model''']
remove_ignore_keys_(a_ )
_UpperCamelCase :Optional[Any] =state_dict['''encoder.embed_tokens.weight'''].shape[0]
_UpperCamelCase :Tuple =MaMaaaConfig(
vocab_size=a_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
_UpperCamelCase :int =state_dict['''decoder.embed_tokens.weight''']
_UpperCamelCase :Union[str, Any] =MaMaaaForConditionalGeneration(a_ )
model.model.load_state_dict(a_ , strict=a_ )
_UpperCamelCase :Any =make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowerCamelCase : Optional[int] = parser.parse_args()
_lowerCamelCase : Union[str, Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 713
|
'''simple docstring'''
from collections.abc import Sequence
def _lowerCAmelCase ( __a , __a ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(__a ) )
def _lowerCAmelCase ( __a , __a ) -> float:
'''simple docstring'''
_UpperCamelCase :Optional[int] =0.0
for coeff in reversed(__a ):
_UpperCamelCase :Dict =result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : int = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 512
| 0
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__A = 3
def __a ( lowerCAmelCase_ : int ) -> int:
'''simple docstring'''
print("""Generating primitive root of p""" )
while True:
UpperCAmelCase_= random.randrange(3 ,lowerCAmelCase_ )
if pow(lowerCAmelCase_ ,2 ,lowerCAmelCase_ ) == 1:
continue
if pow(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) == 1:
continue
return g
def __a ( lowerCAmelCase_ : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
'''simple docstring'''
print("""Generating prime p...""" )
UpperCAmelCase_= rabin_miller.generate_large_prime(lowerCAmelCase_ ) # select large prime number.
UpperCAmelCase_= primitive_root(lowerCAmelCase_ ) # one primitive root on modulo p.
UpperCAmelCase_= random.randrange(3 ,lowerCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase_= cryptomath.find_mod_inverse(pow(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) ,lowerCAmelCase_ )
UpperCAmelCase_= (key_size, e_a, e_a, p)
UpperCAmelCase_= (key_size, d)
return public_key, private_key
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
UpperCAmelCase_, UpperCAmelCase_= generate_key(lowerCAmelCase_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def __a ( ) -> None:
'''simple docstring'''
print("""Making key files...""" )
make_key_files("""elgamal""" ,20_48 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 593
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''YolosFeatureExtractor''']
__A = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 593
| 1
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( a__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = (DDIMParallelScheduler,)
__UpperCamelCase : int = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def lowerCAmelCase__ ( self : List[Any] , **snake_case_ : List[str] ):
UpperCamelCase_: str = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**lowercase__ )
return config
def lowerCAmelCase__ ( self : Union[str, Any] , **snake_case_ : str ):
UpperCamelCase_: List[Any] = self.scheduler_classes[0]
UpperCamelCase_: List[Any] = self.get_scheduler_config(**lowercase__ )
UpperCamelCase_: str = scheduler_class(**lowercase__ )
UpperCamelCase_: Union[str, Any] = 10, 0.0
UpperCamelCase_: List[str] = self.dummy_model()
UpperCamelCase_: Tuple = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for t in scheduler.timesteps:
UpperCamelCase_: Union[str, Any] = model(lowercase__ , lowercase__ )
UpperCamelCase_: Any = scheduler.step(lowercase__ , lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase__ ( self : List[Any] ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase__ )
UpperCamelCase_: int = self.scheduler_classes[0]
UpperCamelCase_: int = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase_: Optional[int] = scheduler_class(**lowercase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowerCAmelCase__ ( self : List[str] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase__ , beta_end=lowercase__ )
def lowerCAmelCase__ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase__ )
def lowerCAmelCase__ ( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def lowerCAmelCase__ ( self : List[str] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase__ )
def lowerCAmelCase__ ( self : int ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase__ )
def lowerCAmelCase__ ( self : Any ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase__ )
def lowerCAmelCase__ ( self : Any ):
self.check_over_configs(thresholding=lowercase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase__ , prediction_type=lowercase__ , sample_max_value=lowercase__ , )
def lowerCAmelCase__ ( self : int ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowercase__ )
def lowerCAmelCase__ ( self : Optional[int] ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowercase__ , num_inference_steps=lowercase__ )
def lowerCAmelCase__ ( self : Optional[Any] ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase__ , eta=lowercase__ )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = self.scheduler_classes[0]
UpperCamelCase_: List[Any] = self.get_scheduler_config()
UpperCamelCase_: Union[str, Any] = scheduler_class(**lowercase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Any = self.scheduler_classes[0]
UpperCamelCase_: Optional[int] = self.get_scheduler_config()
UpperCamelCase_: Optional[int] = scheduler_class(**lowercase__ )
UpperCamelCase_: Tuple = 10, 0.0
scheduler.set_timesteps(lowercase__ )
UpperCamelCase_: Tuple = self.dummy_model()
UpperCamelCase_: Any = self.dummy_sample_deter
UpperCamelCase_: Tuple = self.dummy_sample_deter + 0.1
UpperCamelCase_: Optional[Any] = self.dummy_sample_deter - 0.1
UpperCamelCase_: Optional[int] = samplea.shape[0]
UpperCamelCase_: List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase_: Optional[Any] = torch.arange(lowercase__ )[0:3, None].repeat(1 , lowercase__ )
UpperCamelCase_: List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase_: Dict = scheduler.batch_step_no_noise(lowercase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase__ )
UpperCamelCase_: List[Any] = torch.sum(torch.abs(lowercase__ ) )
UpperCamelCase_: Optional[int] = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[Any] = self.full_loop()
UpperCamelCase_: int = torch.sum(torch.abs(lowercase__ ) )
UpperCamelCase_: int = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = self.full_loop(prediction_type="""v_prediction""" )
UpperCamelCase_: int = torch.sum(torch.abs(lowercase__ ) )
UpperCamelCase_: Optional[Any] = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Tuple = self.full_loop(set_alpha_to_one=lowercase__ , beta_start=0.01 )
UpperCamelCase_: str = torch.sum(torch.abs(lowercase__ ) )
UpperCamelCase_: Optional[Any] = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Optional[Any] = self.full_loop(set_alpha_to_one=lowercase__ , beta_start=0.01 )
UpperCamelCase_: Dict = torch.sum(torch.abs(lowercase__ ) )
UpperCamelCase_: Optional[Any] = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 703
|
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670
| 0
|
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
_lowerCamelCase : str = len(bin(_lowerCAmelCase )[3:] )
_lowerCamelCase : List[str] = bin(abs(_lowerCAmelCase ) - (1 << binary_number_length) )[3:]
_lowerCamelCase : List[str] = (
(
"1"
+ "0" * (binary_number_length - len(_lowerCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44
| 1
|
'''simple docstring'''
import itertools
import math
def a ( __a ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :str = 2
while True:
if is_prime(__a ):
yield num
num += 1
def a ( __a = 10001 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , __a ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 280
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def a ( __a , __a , __a = 10**-10 ) -> float:
'''simple docstring'''
UpperCamelCase__ :Tuple = a
while True:
UpperCamelCase__ :Dict = Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 280
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : Dict = '''van'''
def __init__( self , __lowercase=224 , __lowercase=3 , __lowercase=[7, 3, 3, 3] , __lowercase=[4, 2, 2, 2] , __lowercase=[64, 128, 320, 512] , __lowercase=[3, 3, 12, 3] , __lowercase=[8, 8, 4, 4] , __lowercase="gelu" , __lowercase=0.0_2 , __lowercase=1E-6 , __lowercase=1E-2 , __lowercase=0.0 , __lowercase=0.0 , **__lowercase , ):
"""simple docstring"""
super().__init__(**a_ )
__A : Optional[Any] = image_size
__A : Optional[Any] = num_channels
__A : List[str] = patch_sizes
__A : Optional[Any] = strides
__A : int = hidden_sizes
__A : Optional[int] = depths
__A : List[str] = mlp_ratios
__A : int = hidden_act
__A : List[str] = initializer_range
__A : str = layer_norm_eps
__A : Union[str, Any] = layer_scale_init_value
__A : int = drop_path_rate
__A : Tuple = dropout_rate
| 365
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = BlipImageProcessor()
lowerCamelCase = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCamelCase = BlipProcessor(lowerCamelCase__ , lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
lowerCamelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors="""np""" )
lowerCamelCase = processor(images=lowerCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowerCamelCase = "lower newer"
lowerCamelCase = processor(text=lowerCamelCase__ )
lowerCamelCase = tokenizer(lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowerCamelCase = "lower newer"
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase = processor.batch_decode(lowerCamelCase__ )
lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowerCamelCase = "lower newer"
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 718
|
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ = None , snake_case__ = None ) -> None:
if start is None:
lowerCamelCase = 0
if end is None:
lowerCamelCase = len(snake_case__ ) - 1
if start >= end:
return
lowerCamelCase = (start + end) // 2
slowsort(snake_case__ , snake_case__ , snake_case__ )
slowsort(snake_case__ , mid + 1 , snake_case__ )
if sequence[end] < sequence[mid]:
lowerCamelCase , lowerCamelCase = sequence[mid], sequence[end]
slowsort(snake_case__ , snake_case__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 533
| 0
|
import re
from filelock import FileLock
try:
import nltk
__a: Any = True
except (ImportError, ModuleNotFoundError):
__a: Optional[int] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> str:
re.sub("""<n>""" , """""" , __snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__snake_case ) )
| 108
|
def __snake_case ( _UpperCamelCase ) -> int:
_a = len(_UpperCamelCase )
_a = sum(_UpperCamelCase )
_a = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_a = True
for i in range(1 , s + 1 ):
_a = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_a = dp[i][j - 1]
if arr[i - 1] <= j:
_a = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_a = s - 2 * j
break
return diff
| 487
| 0
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0 , SCREAMING_SNAKE_CASE : int = 2_2 ):
'''simple docstring'''
lowerCAmelCase : Dict = range(1 , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = range(1 , SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }")
| 681
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681
| 1
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict ):
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple="attention" ):
UpperCamelCase :Union[str, Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
UpperCamelCase :List[str] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase :int = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
UpperCamelCase :List[str] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase :int = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
UpperCamelCase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase :Tuple = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
UpperCamelCase :int = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=False ):
if split_mlp_wi:
UpperCamelCase :Optional[int] = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
UpperCamelCase :str = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
UpperCamelCase :Tuple = (wi_a, wi_a)
else:
UpperCamelCase :Tuple = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
UpperCamelCase :int = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def _A ( SCREAMING_SNAKE_CASE__ : dict , *, SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : bool = False ):
UpperCamelCase :str = traverse_util.flatten_dict(variables['''target'''] )
UpperCamelCase :Dict = {'''/'''.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase :List[Any] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase :Dict = old['''token_embedder/embedding''']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :List[str] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''attention''' )
UpperCamelCase :Dict = layer_norm
UpperCamelCase :Union[str, Any] = k.T
UpperCamelCase :Optional[Any] = o.T
UpperCamelCase :str = q.T
UpperCamelCase :List[Any] = v.T
# Block i, layer 1 (MLP).
UpperCamelCase :Dict = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase , UpperCamelCase :Optional[Any] = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = layer_norm
if split_mlp_wi:
UpperCamelCase :Any = wi[0].T
UpperCamelCase :List[Any] = wi[1].T
else:
UpperCamelCase :Any = wi.T
UpperCamelCase :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :Dict = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' ).T
UpperCamelCase :int = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
UpperCamelCase :Union[str, Any] = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , '''encoder''' ).T
UpperCamelCase :int = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :Optional[Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''self_attention''' )
UpperCamelCase :str = layer_norm
UpperCamelCase :Any = k.T
UpperCamelCase :Dict = o.T
UpperCamelCase :int = q.T
UpperCamelCase :Dict = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase :Tuple = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''encoder_decoder_attention''' )
UpperCamelCase :Dict = layer_norm
UpperCamelCase :List[str] = k.T
UpperCamelCase :Optional[Any] = o.T
UpperCamelCase :str = q.T
UpperCamelCase :int = v.T
# Block i, layer 2 (MLP).
UpperCamelCase :Dict = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase , UpperCamelCase :Dict = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = layer_norm
if split_mlp_wi:
UpperCamelCase :Any = wi[0].T
UpperCamelCase :int = wi[1].T
else:
UpperCamelCase :Union[str, Any] = wi.T
UpperCamelCase :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :List[Any] = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' ).T
UpperCamelCase :Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase :Optional[int] = old['''decoder/logits_dense/kernel'''].T
return new
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool ):
UpperCamelCase :List[str] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Tuple = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCamelCase :Optional[int] = state_dict['''shared.weight''']
return state_dict
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
UpperCamelCase :Optional[Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = convert_tax_to_pytorch(
SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ , scalable_attention=SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ):
UpperCamelCase :int = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase :Dict = UMTaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase :str = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print('''Done''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
__snake_case = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 658
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase, lowercase ):
"""simple docstring"""
UpperCamelCase_ : int ='focalnet'
def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=96 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=[192, 384, 768, 768] , SCREAMING_SNAKE_CASE_=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[3, 3, 3, 3] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1e-4 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = image_size
UpperCamelCase :Dict = patch_size
UpperCamelCase :Tuple = num_channels
UpperCamelCase :int = embed_dim
UpperCamelCase :Optional[Any] = use_conv_embed
UpperCamelCase :str = hidden_sizes
UpperCamelCase :str = depths
UpperCamelCase :Optional[int] = focal_levels
UpperCamelCase :Tuple = focal_windows
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :Optional[int] = mlp_ratio
UpperCamelCase :Optional[Any] = hidden_dropout_prob
UpperCamelCase :int = drop_path_rate
UpperCamelCase :Dict = use_layerscale
UpperCamelCase :List[str] = layerscale_value
UpperCamelCase :Tuple = use_post_layernorm
UpperCamelCase :int = use_post_layernorm_in_modulation
UpperCamelCase :str = normalize_modulator
UpperCamelCase :Any = initializer_range
UpperCamelCase :Optional[Any] = layer_norm_eps
UpperCamelCase :Dict = encoder_stride
UpperCamelCase :int = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase :int = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 658
| 1
|
def _snake_case ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : set ) -> int:
lowerCamelCase_ : List[str] =len(lowerCamelCase__ ), len(grid[0] )
if (
min(lowerCamelCase__ , lowerCamelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCamelCase_ : List[Any] =0
count += depth_first_search(lowerCamelCase__ , row + 1 , lowerCamelCase__ , lowerCamelCase__ )
count += depth_first_search(lowerCamelCase__ , row - 1 , lowerCamelCase__ , lowerCamelCase__ )
count += depth_first_search(lowerCamelCase__ , lowerCamelCase__ , col + 1 , lowerCamelCase__ )
count += depth_first_search(lowerCamelCase__ , lowerCamelCase__ , col - 1 , lowerCamelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Any = logging.get_logger(__name__)
A__ : Union[str, Any] = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Union[str, Any] = "deformable_detr"
_UpperCAmelCase :int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Any , snake_case__ : Dict=True , snake_case__ : str=None , snake_case__ : List[str]=3 , snake_case__ : Optional[int]=300 , snake_case__ : int=1024 , snake_case__ : List[str]=6 , snake_case__ : Any=1024 , snake_case__ : Optional[int]=8 , snake_case__ : Any=6 , snake_case__ : Any=1024 , snake_case__ : Any=8 , snake_case__ : Optional[int]=0.0 , snake_case__ : str=True , snake_case__ : Optional[int]="relu" , snake_case__ : List[Any]=256 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=0.0 , snake_case__ : Dict=0.0 , snake_case__ : Tuple=0.02 , snake_case__ : int=1.0 , snake_case__ : Any=True , snake_case__ : int=False , snake_case__ : Optional[int]="sine" , snake_case__ : Tuple="resnet50" , snake_case__ : str=True , snake_case__ : Any=False , snake_case__ : Optional[int]=4 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[Any]=False , snake_case__ : int=300 , snake_case__ : Tuple=False , snake_case__ : List[str]=1 , snake_case__ : str=5 , snake_case__ : Dict=2 , snake_case__ : List[str]=1 , snake_case__ : List[str]=1 , snake_case__ : Union[str, Any]=5 , snake_case__ : Optional[int]=2 , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.25 , snake_case__ : List[str]=False , **snake_case__ : Union[str, Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCamelCase_ : Dict =CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : Optional[int] =backbone_config.get("model_type" )
lowerCamelCase_ : Optional[Any] =CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ : List[str] =config_class.from_dict(snake_case__ )
lowerCamelCase_ : Any =use_timm_backbone
lowerCamelCase_ : str =backbone_config
lowerCamelCase_ : Tuple =num_channels
lowerCamelCase_ : List[Any] =num_queries
lowerCamelCase_ : str =max_position_embeddings
lowerCamelCase_ : Optional[int] =d_model
lowerCamelCase_ : Optional[int] =encoder_ffn_dim
lowerCamelCase_ : List[str] =encoder_layers
lowerCamelCase_ : Optional[Any] =encoder_attention_heads
lowerCamelCase_ : Any =decoder_ffn_dim
lowerCamelCase_ : List[Any] =decoder_layers
lowerCamelCase_ : Any =decoder_attention_heads
lowerCamelCase_ : List[Any] =dropout
lowerCamelCase_ : Union[str, Any] =attention_dropout
lowerCamelCase_ : str =activation_dropout
lowerCamelCase_ : List[str] =activation_function
lowerCamelCase_ : str =init_std
lowerCamelCase_ : Optional[Any] =init_xavier_std
lowerCamelCase_ : Optional[int] =encoder_layerdrop
lowerCamelCase_ : Optional[int] =auxiliary_loss
lowerCamelCase_ : List[Any] =position_embedding_type
lowerCamelCase_ : List[str] =backbone
lowerCamelCase_ : List[str] =use_pretrained_backbone
lowerCamelCase_ : int =dilation
# deformable attributes
lowerCamelCase_ : Union[str, Any] =num_feature_levels
lowerCamelCase_ : List[str] =encoder_n_points
lowerCamelCase_ : int =decoder_n_points
lowerCamelCase_ : Tuple =two_stage
lowerCamelCase_ : Union[str, Any] =two_stage_num_proposals
lowerCamelCase_ : Optional[int] =with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCamelCase_ : Union[str, Any] =class_cost
lowerCamelCase_ : Any =bbox_cost
lowerCamelCase_ : str =giou_cost
# Loss coefficients
lowerCamelCase_ : int =mask_loss_coefficient
lowerCamelCase_ : Dict =dice_loss_coefficient
lowerCamelCase_ : List[str] =bbox_loss_coefficient
lowerCamelCase_ : Union[str, Any] =giou_loss_coefficient
lowerCamelCase_ : Tuple =eos_coefficient
lowerCamelCase_ : List[Any] =focal_alpha
lowerCamelCase_ : Union[str, Any] =disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def UpperCAmelCase__ ( self : Dict ):
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : int ):
return self.d_model
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Union[str, Any] =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase_ : Tuple =self.backbone_config.to_dict()
lowerCamelCase_ : int =self.__class__.model_type
return output
| 244
| 0
|
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Any = []
_lowerCAmelCase : Dict = 11
_lowerCAmelCase : Tuple = int("1" + "0" * digit_len )
for num in range(__a , __a ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__a , __a ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
_lowerCAmelCase : Union[str, Any] = 10
return solutions
def UpperCamelCase_ ( lowerCAmelCase__ = 2 ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 1.0
for fraction in fraction_list(__a ):
_lowerCAmelCase : List[str] = Fraction(__a )
result *= frac.denominator / frac.numerator
return int(__a )
if __name__ == "__main__":
print(solution())
| 424
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCamelCase__ :List[str] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ :Union[str, Any] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ :Union[str, Any] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ :Any = model(UpperCamelCase_ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1e-3 ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCamelCase__ :List[str] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ :Any = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ :List[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1e-3 ) )
| 189
| 0
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A_ : int = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=__lowercase , cache_dir=__lowercase )
A_ : str = [t[-1] for t in os.walk(os.path.join(__lowercase , os.listdir(__lowercase )[0] , """snapshots""" ) )]
A_ : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
A_ , A_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=__lowercase )
A_ : List[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
A_ : Optional[int] = jax.random.PRNGKey(0 )
A_ : Dict = 4
A_ : Dict = jax.device_count()
A_ : Tuple = num_samples * [prompt]
A_ : Any = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
A_ : Any = replicate(__lowercase )
A_ : Optional[Any] = jax.random.split(__lowercase , __lowercase )
A_ : Optional[int] = shard(__lowercase )
A_ : List[str] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(__lowercase , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
A_ : Dict = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowercase ) == num_samples
def _lowerCamelCase ( self ):
A_ , A_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=__lowercase )
A_ : Tuple = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
A_ : List[str] = jax.random.PRNGKey(0 )
A_ : Optional[Any] = 50
A_ : Optional[Any] = jax.device_count()
A_ : str = num_samples * [prompt]
A_ : Dict = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
A_ : Tuple = replicate(__lowercase )
A_ : Union[str, Any] = jax.random.split(__lowercase , __lowercase )
A_ : List[str] = shard(__lowercase )
A_ : List[Any] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def _lowerCamelCase ( self ):
A_ , A_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=__lowercase )
A_ : List[str] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
A_ : str = jax.random.PRNGKey(0 )
A_ : List[Any] = 50
A_ : Dict = jax.device_count()
A_ : Any = num_samples * [prompt]
A_ : Optional[int] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
A_ : Any = replicate(__lowercase )
A_ : Optional[int] = jax.random.split(__lowercase , __lowercase )
A_ : int = shard(__lowercase )
A_ : Any = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def _lowerCamelCase ( self ):
A_ , A_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
A_ : str = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
A_ : Optional[int] = jax.random.PRNGKey(0 )
A_ : Optional[Any] = 50
A_ : List[Any] = jax.device_count()
A_ : Tuple = num_samples * [prompt]
A_ : str = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
A_ : int = replicate(__lowercase )
A_ : Any = jax.random.split(__lowercase , __lowercase )
A_ : Tuple = shard(__lowercase )
A_ : Tuple = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def _lowerCamelCase ( self ):
A_ : Optional[Any] = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=__lowercase , steps_offset=1 , )
A_ , A_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=__lowercase , safety_checker=__lowercase , )
A_ : List[Any] = scheduler.create_state()
A_ : int = scheduler_state
A_ : int = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
A_ : Any = jax.random.PRNGKey(0 )
A_ : List[str] = 50
A_ : Union[str, Any] = jax.device_count()
A_ : List[Any] = num_samples * [prompt]
A_ : str = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
A_ : str = replicate(__lowercase )
A_ : Optional[int] = jax.random.split(__lowercase , __lowercase )
A_ : Dict = shard(__lowercase )
A_ : List[str] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def _lowerCamelCase ( self ):
A_ : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
A_ : int = jax.device_count()
A_ : Optional[int] = num_samples * [prompt]
A_ : Dict = jax.random.split(jax.random.PRNGKey(0 ) , __lowercase )
A_ , A_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=__lowercase , )
A_ : List[str] = replicate(__lowercase )
A_ : Union[str, Any] = pipeline.prepare_inputs(__lowercase )
A_ : int = shard(__lowercase )
A_ : Optional[int] = pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A_ : Union[str, Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A_ , A_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=__lowercase , use_memory_efficient_attention=__lowercase , )
A_ : str = replicate(__lowercase )
A_ : Tuple = pipeline.prepare_inputs(__lowercase )
A_ : str = shard(__lowercase )
A_ : List[Any] = pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A_ : List[Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 715
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=3 , a__=0.6 , a__=None , ):
A_ : int = parent
A_ : Optional[int] = batch_size
A_ : Any = image_size
A_ : Optional[int] = patch_size
A_ : int = num_channels
A_ : str = is_training
A_ : str = use_labels
A_ : str = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : Any = intermediate_size
A_ : List[Any] = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : str = type_sequence_label_size
A_ : int = initializer_range
A_ : List[Any] = mask_ratio
A_ : str = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ : Optional[Any] = (image_size // patch_size) ** 2
A_ : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowerCamelCase ( self ):
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : int = None
if self.use_labels:
A_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : Optional[int] = ViTMAEModel(config=a__ )
model.to(a__ )
model.eval()
A_ : int = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : int = ViTMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
A_ : Optional[Any] = model(a__ )
A_ : Dict = (self.image_size // self.patch_size) ** 2
A_ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ : Optional[int] = 1
A_ : Any = ViTMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[Any] = model(a__ )
A_ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowerCamelCase ( self ):
A_ : Optional[Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Any = config_and_inputs
A_ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def _lowerCamelCase ( self ):
A_ : int = ViTMAEModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def _lowerCamelCase ( self ):
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = model_class(a__ )
A_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Dict = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def _lowerCamelCase ( self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _lowerCamelCase ( self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
# make masks reproducible
np.random.seed(2 )
A_ : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A_ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ : Optional[Any] = torch.from_numpy(a__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ : Any = pt_noise
super().check_pt_tf_models(a__ , a__ , a__ )
def _lowerCamelCase ( self ):
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(a__ )
model.to(a__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(a__ , a__ ) )
A_ : int = outputs[0].cpu().numpy()
A_ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
A_ : Union[str, Any] = model_class.from_pretrained(a__ )
model.to(a__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(a__ , a__ ) )
# Make sure we don't have nans
A_ : Optional[int] = after_outputs[0].cpu().numpy()
A_ : str = 0
A_ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCamelCase ( self ):
pass
@slow
def _lowerCamelCase ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = ViTMAEModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A_ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(a__ )
A_ : Optional[Any] = self.default_image_processor
A_ : Union[str, Any] = prepare_img()
A_ : Union[str, Any] = image_processor(images=a__ , return_tensors="""pt""" ).to(a__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ : Optional[int] = ViTMAEConfig()
A_ : Tuple = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A_ : Dict = model(**a__ , noise=torch.from_numpy(a__ ).to(device=a__ ) )
# verify the logits
A_ : Tuple = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , a__ )
A_ : List[str] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(a__ ) , atol=1E-4 ) )
| 481
| 0
|
"""simple docstring"""
import enum
import shutil
import sys
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__:Union[str, Any] = shutil.get_terminal_size()
SCREAMING_SNAKE_CASE__:int = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class snake_case__ ( enum.Enum ):
_snake_case : Union[str, Any] = 0
_snake_case : Any = 1
def _lowerCamelCase( a , a="" ):
sys.stdout.write(str(a ) + end )
sys.stdout.flush()
def _lowerCamelCase( a , a , a="" ):
forceWrite(F"\u001b[{color}m{content}\u001b[0m" , a )
def _lowerCamelCase( ):
forceWrite("\r" )
def _lowerCamelCase( a , a ):
forceWrite(F"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" )
def _lowerCamelCase( ):
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def _lowerCamelCase( ):
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 528
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowerCamelCase( a , a = "cpu" , a = None ):
__a = torch.load(a , map_location=a )
for k, v in tqdm(state_dict.items() ):
if not isinstance(a , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
__a = v.half()
if save_path is None: # overwrite src_path
__a = src_path
torch.save(a , a )
if __name__ == "__main__":
fire.Fire(convert)
| 528
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Optional[int] = 'gpt_neo'
lowerCamelCase_ : Optional[int] = ['past_key_values']
lowerCamelCase_ : Union[str, Any] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , lowerCamelCase=50257 , lowerCamelCase=2048 , lowerCamelCase=2048 , lowerCamelCase=24 , lowerCamelCase=[[["global", "local"], 12]] , lowerCamelCase=16 , lowerCamelCase=None , lowerCamelCase=256 , lowerCamelCase="gelu_new" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=1e-5 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=50256 , lowerCamelCase=50256 , **lowerCamelCase , ) -> Union[str, Any]:
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_layers
snake_case_ = num_heads
snake_case_ = intermediate_size
snake_case_ = window_size
snake_case_ = activation_function
snake_case_ = resid_dropout
snake_case_ = embed_dropout
snake_case_ = attention_dropout
snake_case_ = classifier_dropout
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_range
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
snake_case_ = attention_types
snake_case_ = self.expand_attention_types_params(lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
@staticmethod
def lowerCAmelCase_ ( lowerCamelCase ) -> Tuple:
snake_case_ = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
import torch
snake_case_ = input.size()
snake_case_ = len(lowercase_ )
snake_case_ = shape[dimension]
snake_case_ = torch.arange(0 , lowercase_ , lowercase_ )
snake_case_ = torch.div(sizedim - size , lowercase_ , rounding_mode="""floor""" ) + 1
snake_case_ = torch.arange(lowercase_ ) + low_indices[:min_length][:, None]
snake_case_ = [slice(lowercase_ )] * rank
snake_case_ = indices
snake_case_ = input[s]
snake_case_ = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowercase_ )
def UpperCamelCase( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
import torch
snake_case_ = torch.arange(1 , lowercase_ )
snake_case_ = torch.remainder(lowercase_ , lowercase_ )
snake_case_ = remainders == 0
snake_case_ = candidates[divisor_indices]
snake_case_ = torch.max(lowercase_ )
return largest_divisor, torch.div(lowercase_ , lowercase_ , rounding_mode="""floor""" )
class __lowerCamelCase ( __snake_case ):
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
snake_case_ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="""inputs""" )
snake_case_ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case_ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCAmelCase_ ( self ) -> int:
return self._config.num_heads
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ) -> Mapping[str, Any]:
snake_case_ = super(lowerCamelCase , self ).generate_dummy_inputs(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
# We need to order the input in the way they appears in the forward()
snake_case_ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case_ , snake_case_ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case_ = seqlen + 2
snake_case_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case_ = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers )
]
snake_case_ = common_inputs["""attention_mask"""]
if self.use_past:
snake_case_ = ordered_inputs["""attention_mask"""].dtype
snake_case_ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase_ ( self ) -> int:
return 13
| 161
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( __snake_case , unittest.TestCase ):
lowerCamelCase_ : Dict = PegasusTokenizer
lowerCamelCase_ : Optional[int] = PegasusTokenizerFast
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Optional[int] = True
def lowerCAmelCase_ ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PegasusTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase_ ( self ) -> str:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def lowerCAmelCase_ ( self , **lowerCamelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Optional[Any]:
return ("This is a test", "This is a test")
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case_ = """</s>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(lowerCamelCase ) , 1103 )
def lowerCAmelCase_ ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
snake_case_ = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
snake_case_ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
snake_case_ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
snake_case_ = tokenizer([raw_input_str] , return_tensors=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
snake_case_ = """To ensure a smooth flow of bank resolutions."""
snake_case_ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
snake_case_ = tokenizer([raw_input_str] , return_tensors=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = ["""This is going to be way too long.""" * 150, """short example"""]
snake_case_ = ["""not super long but more than 5 tokens""", """tiny"""]
snake_case_ = self._large_tokenizer(lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
snake_case_ = self._large_tokenizer(
text_target=lowerCamelCase , max_length=5 , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase ) == 2 # input_ids, attention_mask.
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
# fmt: off
snake_case_ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( __snake_case , unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = PegasusTokenizer
lowerCamelCase_ : int = PegasusTokenizerFast
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : int = True
def lowerCAmelCase_ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PegasusTokenizer(lowerCamelCase , offset=0 , mask_token_sent=lowerCamelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase_ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def lowerCAmelCase_ ( self , **lowerCamelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> int:
return ("This is a test", "This is a test")
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
snake_case_ = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@require_torch
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = ["""This is going to be way too long.""" * 1000, """short example"""]
snake_case_ = ["""not super long but more than 5 tokens""", """tiny"""]
snake_case_ = self._large_tokenizer(lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
snake_case_ = self._large_tokenizer(
text_target=lowerCamelCase , max_length=5 , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase ) == 2 # input_ids, attention_mask.
def lowerCAmelCase_ ( self ) -> int:
snake_case_ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
snake_case_ = self._large_tokenizer(lowerCamelCase ).input_ids
self.assertListEqual(
lowerCamelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 161
| 1
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a__ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : int , a__ : bool , a__ : Optional[int] = None , a__ : Optional[int] = None ):
super().__init__()
UpperCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase = torch.zeros(a__ , a__ )
else:
UpperCAmelCase = None
UpperCAmelCase = torch.nn.Parameter(a__ )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =42
_lowerCamelCase =42
_lowerCamelCase =42
_lowerCamelCase =42
_lowerCamelCase =42
_lowerCamelCase =42
def __init__( self : List[str] , a__ : VQModel , a__ : CLIPTextModel , a__ : CLIPTokenizer , a__ : TransformeraDModel , a__ : VQDiffusionScheduler , a__ : LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=a__ , transformer=a__ , text_encoder=a__ , tokenizer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
def __snake_case ( self : Any , a__ : Dict , a__ : Optional[int] , a__ : Optional[Any] ):
UpperCAmelCase = len(a__ ) if isinstance(a__ , a__ ) else 1
# get prompt text embeddings
UpperCAmelCase = self.tokenizer(
a__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=a__ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase = prompt_embeds.repeat_interleave(a__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(a__ , 1 , 1 )
else:
UpperCAmelCase = [''''''] * batch_size
UpperCAmelCase = text_input_ids.shape[-1]
UpperCAmelCase = self.tokenizer(
a__ , padding='''max_length''' , max_length=a__ , truncation=a__ , return_tensors='''pt''' , )
UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=a__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase = negative_prompt_embeds.shape[1]
UpperCAmelCase = negative_prompt_embeds.repeat(1 , a__ , 1 )
UpperCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] , a__ : Union[str, List[str]] , a__ : int = 100 , a__ : float = 5.0 , a__ : float = 1.0 , a__ : int = 1 , a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[str] = "pil" , a__ : bool = True , a__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a__ : int = 1 , ):
if isinstance(a__ , a__ ):
UpperCAmelCase = 1
elif isinstance(a__ , a__ ):
UpperCAmelCase = len(a__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(a__ )}" )
UpperCAmelCase = batch_size * num_images_per_prompt
UpperCAmelCase = guidance_scale > 1.0
UpperCAmelCase = self._encode_prompt(a__ , a__ , a__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a__ , a__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(a__ )}." )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase = self.transformer.num_vector_embeds - 1
UpperCAmelCase = torch.full(a__ , a__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
UpperCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a__ , device=self.device )
UpperCAmelCase = self.scheduler.timesteps.to(self.device )
UpperCAmelCase = latents
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase = self.transformer(a__ , encoder_hidden_states=a__ , timestep=a__ ).sample
if do_classifier_free_guidance:
UpperCAmelCase, UpperCAmelCase = model_output.chunk(2 )
UpperCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(a__ , dim=1 , keepdim=a__ )
UpperCAmelCase = self.truncate(a__ , a__ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(a__ , timestep=a__ , sample=a__ , generator=a__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a__ , a__ , a__ )
UpperCAmelCase = self.vqvae.config.vq_embed_dim
UpperCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase = self.vqvae.quantize.get_codebook_entry(a__ , shape=a__ )
UpperCAmelCase = self.vqvae.decode(a__ , force_not_quantize=a__ ).sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
def __snake_case ( self : List[str] , a__ : torch.FloatTensor , a__ : float ):
UpperCAmelCase, UpperCAmelCase = torch.sort(a__ , 1 , descending=a__ )
UpperCAmelCase = torch.exp(a__ )
UpperCAmelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , a__ )
UpperCAmelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase = keep_mask[:, :-1, :]
UpperCAmelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase = log_p_x_0.clone()
UpperCAmelCase = -torch.inf # -inf = log(0)
return rv
| 51
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["pixel_values"]
def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ):
super().__init__(**a__ )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = offset
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ):
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ )
def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ):
UpperCAmelCase = image.astype(np.floataa )
if offset:
UpperCAmelCase = image - (scale / 2)
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = to_numpy_array(a__ )
if do_resize:
UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ )
if do_center_crop:
UpperCAmelCase = self.center_crop(a__ , size=a__ )
if do_rescale:
UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ )
if do_normalize:
UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ )
UpperCAmelCase = to_channel_dimension_format(a__ , a__ )
return image
def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = offset if offset is not None else self.offset
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase = make_batched(a__ )
UpperCAmelCase = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=a__ , tensor_type=a__ )
| 51
| 1
|
from typing import List
from .keymap import KEYMAP, get_character
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
def decorator(lowercase_ ):
__UpperCAmelCase : Dict = getattr(lowercase_ , '''handle_key''' , [] )
handle += [key]
setattr(lowercase_ , '''handle_key''' , lowercase_ )
return func
return decorator
def __SCREAMING_SNAKE_CASE ( *lowercase_ ) -> int:
'''simple docstring'''
def decorator(lowercase_ ):
__UpperCAmelCase : List[Any] = getattr(lowercase_ , '''handle_key''' , [] )
handle += keys
setattr(lowercase_ , '''handle_key''' , lowercase_ )
return func
return decorator
class lowerCamelCase ( _UpperCamelCase ):
def __new__( cls , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = super().__new__(cls , lowercase__ , lowercase__ , lowercase__)
if not hasattr(lowercase__ , '''key_handler'''):
setattr(lowercase__ , '''key_handler''' , {})
setattr(lowercase__ , '''handle_input''' , KeyHandler.handle_input)
for value in attrs.values():
__UpperCAmelCase : str = getattr(lowercase__ , '''handle_key''' , [])
for key in handled_keys:
__UpperCAmelCase : List[str] = value
return new_cls
@staticmethod
def A( cls):
__UpperCAmelCase : Optional[int] = get_character()
if char != KEYMAP["undefined"]:
__UpperCAmelCase : Any = ord(lowercase__)
__UpperCAmelCase : int = cls.key_handler.get(lowercase__)
if handler:
__UpperCAmelCase : List[str] = char
return handler(cls)
else:
return None
def __SCREAMING_SNAKE_CASE ( cls ) -> int:
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 675
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675
| 1
|
def __UpperCAmelCase ( a_ , a_):
_enforce_args(__lowerCamelCase , __lowerCamelCase)
if n == 0:
return 0
snake_case_ = float('-inf')
for i in range(1 , n + 1):
snake_case_ = max(
__lowerCamelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , __lowerCamelCase))
return max_revue
def __UpperCAmelCase ( a_ , a_):
_enforce_args(__lowerCamelCase , __lowerCamelCase)
snake_case_ = [float('-inf') for _ in range(n + 1)]
return _top_down_cut_rod_recursive(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def __UpperCAmelCase ( a_ , a_ , a_):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
snake_case_ = float('-inf')
for i in range(1 , n + 1):
snake_case_ = max(
__lowerCamelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __lowerCamelCase , __lowerCamelCase) , )
snake_case_ = max_revenue
return max_rev[n]
def __UpperCAmelCase ( a_ , a_):
_enforce_args(__lowerCamelCase , __lowerCamelCase)
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
snake_case_ = [float('-inf') for _ in range(n + 1)]
snake_case_ = 0
for i in range(1 , n + 1):
snake_case_ = max_rev[i]
for j in range(1 , i + 1):
snake_case_ = max(__lowerCamelCase , prices[j - 1] + max_rev[i - j])
snake_case_ = max_revenue_i
return max_rev[n]
def __UpperCAmelCase ( a_ , a_):
if n < 0:
snake_case_ = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(__lowerCamelCase)
if n > len(__lowerCamelCase):
snake_case_ = (
'''Each integral piece of rod must have a corresponding price. '''
f'''Got n = {n} but length of prices = {len(__lowerCamelCase)}'''
)
raise ValueError(__lowerCamelCase)
def __UpperCAmelCase ( ):
snake_case_ = [6, 10, 12, 15, 20, 23]
snake_case_ = len(__lowerCamelCase)
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
snake_case_ = 36
snake_case_ = top_down_cut_rod(__lowerCamelCase , __lowerCamelCase)
snake_case_ = bottom_up_cut_rod(__lowerCamelCase , __lowerCamelCase)
snake_case_ = naive_cut_rod_recursive(__lowerCamelCase , __lowerCamelCase)
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 198
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=False ) -> Union[str, Any]:
lowercase__ : Tuple = '''backbone.''' if is_semantic else ''''''
lowercase__ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", '''beit.embeddings.cls_token'''),
(f"""{prefix}patch_embed.proj.weight""", '''beit.embeddings.patch_embeddings.projection.weight'''),
(f"""{prefix}patch_embed.proj.bias""", '''beit.embeddings.patch_embeddings.projection.bias'''),
(f"""{prefix}pos_embed""", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
lowercase__ : Union[str, Any] = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
lowercase__ : int = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowercase__ : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
lowercase__ : Dict = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
lowercase__ : str = in_proj_weight[
: config.hidden_size, :
]
lowercase__ : str = q_bias
lowercase__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : Tuple = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase__ : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
lowercase__ : Optional[int] = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
lowercase__ : int = gamma_a
lowercase__ : List[str] = gamma_a
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : str = dct.pop(__lowerCamelCase )
lowercase__ : Optional[Any] = val
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : Dict = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Optional[int]:
lowercase__ : List[Any] = False if '''rvlcdip''' in checkpoint_url else True
lowercase__ : Optional[int] = BeitConfig(use_absolute_position_embeddings=__lowerCamelCase , use_mask_token=__lowerCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase__ : Dict = 10_24
lowercase__ : Any = 40_96
lowercase__ : Optional[Any] = 24
lowercase__ : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase__ : str = 16
lowercase__ : Optional[int] = '''huggingface/label-files'''
lowercase__ : int = '''rvlcdip-id2label.json'''
lowercase__ : Optional[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Optional[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : List[Any] = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase__ : Optional[Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
lowercase__ : int = create_rename_keys(__lowerCamelCase , has_lm_head=__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , has_lm_head=__lowerCamelCase )
# load HuggingFace model
lowercase__ : str = BeitForMaskedImageModeling(__lowerCamelCase ) if has_lm_head else BeitForImageClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image
lowercase__ : int = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase )
lowercase__ : List[Any] = prepare_img()
lowercase__ : Optional[int] = image_processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : int = encoding['''pixel_values''']
lowercase__ : str = model(__lowerCamelCase )
lowercase__ : Optional[int] = outputs.logits
# verify logits
lowercase__ : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__lowerCamelCase ), "Shape of logits not as expected"
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
if has_lm_head:
lowercase__ : List[str] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
lowercase__ : Tuple = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase , __lowerCamelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__lowerCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase , __lowerCamelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__lowerCamelCase , )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
lowerCAmelCase_ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 560
| 0
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
_A = SMALL_MODEL_IDENTIFIER
_A = """pt"""
_A = """tf"""
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_A = TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCAmelCase_ )
model_tf.save_pretrained(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = """mock_framework"""
# Framework provided - return whatever the user provides
_A = FeaturesManager.determine_framework(self.test_model , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase_ )
_A = FeaturesManager.determine_framework(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase_ )
_A = FeaturesManager.determine_framework(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase_ )
_A = FeaturesManager.determine_framework(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase_ )
_A = FeaturesManager.determine_framework(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCAmelCase_ ):
_A = FeaturesManager.determine_framework(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> str:
_A = MagicMock(return_value=lowerCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase_ ):
_A = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_A = MagicMock(return_value=lowerCAmelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , lowerCAmelCase_ ):
_A = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
_A = MagicMock(return_value=lowerCAmelCase_ )
_A = MagicMock(return_value=lowerCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase_ ):
_A = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase_ , self.framework_pt )
# Both not in environment -> raise error
_A = MagicMock(return_value=lowerCAmelCase_ )
_A = MagicMock(return_value=lowerCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase_ ):
with self.assertRaises(lowerCAmelCase_ ):
_A = FeaturesManager.determine_framework(self.test_model )
| 721
|
_SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( snake_case__ :str) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def snake_case ( snake_case__ :str) -> str:
return "".join(REVERSE_DICT[char] for char in message.split())
def snake_case ( ) -> None:
_A = """Morse code here!"""
print(snake_case__)
_A = encrypt(snake_case__)
print(snake_case__)
_A = decrypt(snake_case__)
print(snake_case__)
if __name__ == "__main__":
main()
| 83
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : List[str] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 286
|
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int , __a : Optional[int] , __a : Union[str, Any] ) -> str:
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy'''
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : List[Any] , __a : Union[str, Any]=0 , __a : List[str]=(4, 4, 64, 64) , __a : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : List[Any] = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def A_ ( self : Any , __a : Any=False , __a : Dict="CompVis/stable-diffusion-v1-4" ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : int = 'bf16' if fpaa else None
__snake_case , __snake_case : Union[str, Any] = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='unet' , dtype=__a , revision=__a )
return model, params
def A_ ( self : Any , __a : Dict=0 , __a : Dict=(4, 77, 768) , __a : List[str]=False ) -> List[Any]:
'''simple docstring'''
__snake_case : List[Any] = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def A_ ( self : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : Any ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case : Tuple = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=__a )
__snake_case : Tuple = self.get_latents(__a , fpaa=__a )
__snake_case : int = self.get_encoder_hidden_states(__a , fpaa=__a )
__snake_case : List[str] = model.apply(
{'params': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__snake_case : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case : str = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def A_ ( self : str , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case : int = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=__a )
__snake_case : int = self.get_latents(__a , shape=(4, 4, 96, 96) , fpaa=__a )
__snake_case : Optional[Any] = self.get_encoder_hidden_states(__a , shape=(4, 77, 1024) , fpaa=__a )
__snake_case : List[str] = model.apply(
{'params': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__snake_case : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case : int = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 286
| 1
|
"""simple docstring"""
from PIL import Image
def lowercase ( A_ )-> Image:
'''simple docstring'''
a , a : int = image.size
a : Union[str, Any] = 0
a : Optional[Any] = image.load()
for i in range(A_ ):
for j in range(A_ ):
a : Optional[int] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(A_ ):
for i in range(A_ ):
a : List[str] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowercase = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 135
|
"""simple docstring"""
import numpy as np
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
a : List[str] = int(np.ceil((x_end - xa) / h ) )
a : Optional[int] = np.zeros((n + 1,) )
a : Tuple = ya
a : Union[str, Any] = xa
for k in range(A_ ):
a : Any = f(A_ , y[k] )
a : List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
a : str = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
a : Union[str, Any] = f(x + h , y[k] + h * ka )
a : Dict = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135
| 1
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_lowerCAmelCase : Optional[Any] = "A painting of a squirrel eating a burger"
_lowerCAmelCase : int = jax.device_count()
_lowerCAmelCase : Optional[int] = num_samples * [prompt]
_lowerCAmelCase : Optional[Any] = sd_pipe.prepare_inputs(lowercase_ )
_lowerCAmelCase : Optional[Any] = replicate(lowercase_ )
_lowerCAmelCase : List[str] = shard(lowercase_ )
_lowerCAmelCase : Optional[int] = jax.random.PRNGKey(0 )
_lowerCAmelCase : List[str] = jax.random.split(lowercase_ , jax.device_count() )
_lowerCAmelCase : Union[str, Any] = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowerCAmelCase : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCAmelCase : int = images[0, 253:256, 253:256, -1]
_lowerCAmelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCAmelCase : str = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = "stabilityai/stable-diffusion-2"
_lowerCAmelCase , _lowerCAmelCase : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , )
_lowerCAmelCase : Tuple = scheduler_params
_lowerCAmelCase : Optional[Any] = "A painting of a squirrel eating a burger"
_lowerCAmelCase : List[str] = jax.device_count()
_lowerCAmelCase : int = num_samples * [prompt]
_lowerCAmelCase : Any = sd_pipe.prepare_inputs(lowercase_ )
_lowerCAmelCase : int = replicate(lowercase_ )
_lowerCAmelCase : Optional[int] = shard(lowercase_ )
_lowerCAmelCase : List[str] = jax.random.PRNGKey(0 )
_lowerCAmelCase : Any = jax.random.split(lowercase_ , jax.device_count() )
_lowerCAmelCase : str = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowerCAmelCase : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCAmelCase : Optional[Any] = images[0, 253:256, 253:256, -1]
_lowerCAmelCase : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCAmelCase : Optional[Any] = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 424
|
'''simple docstring'''
import operator as op
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = []
snake_case_ = lambda __UpperCAmelCase, __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
snake_case_ = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ), '''Action'''.center(12 ), '''Stack''', sep=''' | ''' )
print('''-''' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ('''push(''' + x + ''')''').ljust(12 ), ''','''.join(__UpperCAmelCase ), sep=''' | ''' )
else:
snake_case_ = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ), ('''pop(''' + b + ''')''').ljust(12 ), ''','''.join(__UpperCAmelCase ), sep=''' | ''' )
snake_case_ = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ), ('''pop(''' + a + ''')''').ljust(12 ), ''','''.join(__UpperCAmelCase ), sep=''' | ''' )
stack.append(
str(opr[x](int(__UpperCAmelCase ), int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ('''push(''' + a + x + b + ''')''').ljust(12 ), ''','''.join(__UpperCAmelCase ), sep=''' | ''', )
return int(stack[0] )
if __name__ == "__main__":
a : Any = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 640
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ) -> Optional[Any]:
lowerCamelCase_ = {}
if train_file is not None:
lowerCamelCase_ = [train_file]
if eval_file is not None:
lowerCamelCase_ = [eval_file]
if test_file is not None:
lowerCamelCase_ = [test_file]
lowerCamelCase_ = datasets.load_dataset('csv' , data_files=_lowerCamelCase )
lowerCamelCase_ = list(ds[list(files.keys() )[0]].features.keys() )
lowerCamelCase_ = features_name.pop(_lowerCamelCase )
lowerCamelCase_ = list(set(ds[list(files.keys() )[0]][label_name] ) )
lowerCamelCase_ = {label: i for i, label in enumerate(_lowerCamelCase )}
lowerCamelCase_ = tokenizer.model_input_names
lowerCamelCase_ = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
lowerCamelCase_ = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
lowerCamelCase_ = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowerCamelCase_ = {k: v for k, v in ex.items() if k in input_names}
lowerCamelCase_ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowerCamelCase_ = {k: v for k, v in ex.items() if k in input_names}
lowerCamelCase_ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowerCamelCase_ = {k: v for k, v in ex.items() if k in input_names}
lowerCamelCase_ = labelaid[ex[label_name]]
yield (d, label)
lowerCamelCase_ = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowerCamelCase_ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowerCamelCase_ = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowerCamelCase_ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowerCamelCase_ = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowerCamelCase_ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class a :
SCREAMING_SNAKE_CASE : int = field(metadata={"""help""": """Which column contains the label"""} )
SCREAMING_SNAKE_CASE : str = field(default=__snake_case , metadata={"""help""": """The path of the training file"""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__snake_case , metadata={"""help""": """The path of the development file"""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__snake_case , metadata={"""help""": """The path of the test file"""} )
SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class a :
SCREAMING_SNAKE_CASE : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE : bool = field(default=__snake_case , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def lowerCamelCase__ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
lowerCamelCase_ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowerCamelCase_ = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase_ = trainer.evaluate()
lowerCamelCase_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(_lowerCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 137
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class a ( unittest.TestCase ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]=7 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Tuple=30 , __SCREAMING_SNAKE_CASE : str=400 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=1 / 255 , __SCREAMING_SNAKE_CASE : Dict=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_pad
def UpperCamelCase ( self : List[Any] ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ) -> str:
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image.size
else:
lowerCamelCase_ , lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size['shortest_edge'] * h / w )
lowerCamelCase_ = self.size['shortest_edge']
elif w > h:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = int(self.size['shortest_edge'] * w / h )
else:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = self.size['shortest_edge']
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
lowerCamelCase_ = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Any = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
lowerCamelCase_ = DeformableDetrImageProcessingTester(self )
@property
def UpperCamelCase ( self : Optional[int] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : int ) -> str:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCamelCase ( self : Optional[int] ) -> int:
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[int] ) -> List[Any]:
pass
def UpperCamelCase ( self : Union[str, Any] ) -> str:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : str ) -> Any:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : Tuple ) -> str:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self : Optional[Any] ) -> str:
# prepare image and target
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'image_id': 39769, 'annotations': target}
# encode them
lowerCamelCase_ = DeformableDetrImageProcessor()
lowerCamelCase_ = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __SCREAMING_SNAKE_CASE ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __SCREAMING_SNAKE_CASE ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __SCREAMING_SNAKE_CASE ) )
@slow
def UpperCamelCase ( self : Tuple ) -> str:
# prepare image, target and masks_path
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
lowerCamelCase_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCamelCase_ = DeformableDetrImageProcessor(format='coco_panoptic' )
lowerCamelCase_ = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __SCREAMING_SNAKE_CASE ) )
# verify masks
lowerCamelCase_ = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __SCREAMING_SNAKE_CASE )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __SCREAMING_SNAKE_CASE ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __SCREAMING_SNAKE_CASE ) )
| 137
| 1
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowercase = 2_9_9_7_9_2_4_5_8
# Symbols
lowercase , lowercase , lowercase , lowercase = symbols("""ct x y z""")
def lowerCamelCase_ ( UpperCamelCase__ : float ):
'''simple docstring'''
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCamelCase_ ( UpperCamelCase__ : float ):
'''simple docstring'''
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCamelCase_ ( UpperCamelCase__ : float ):
'''simple docstring'''
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCamelCase_ ( UpperCamelCase__ : float, UpperCamelCase__ : np.ndarray | None = None ):
'''simple docstring'''
if event is None:
UpperCamelCase__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowercase = transform(2_9_9_7_9_2_4_5)
print("""Example of four vector: """)
print(f'ct\' = {four_vector[0]}')
print(f'x\' = {four_vector[1]}')
print(f'y\' = {four_vector[2]}')
print(f'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
lowercase = {ct: c, x: 1, y: 1, z: 1}
lowercase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'\n{numerical_vector}')
| 240
|
from typing import Any
import numpy as np
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray ):
'''simple docstring'''
return np.array_equal(UpperCamelCase__, matrix.conjugate().T )
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray, UpperCamelCase__ : np.ndarray ):
'''simple docstring'''
UpperCamelCase__ = v.conjugate().T
UpperCamelCase__ = v_star.dot(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, np.ndarray )
return (v_star_dot.dot(UpperCamelCase__ )) / (v_star.dot(UpperCamelCase__ ))
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
UpperCamelCase__ = np.array([[1], [2], [3]] )
assert is_hermitian(UpperCamelCase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(UpperCamelCase__, UpperCamelCase__ ) )
UpperCamelCase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(UpperCamelCase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(UpperCamelCase__, UpperCamelCase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 240
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 302
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : str = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
A_ : int = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
A_ : List[str] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A_ : Union[str, Any] = key[key.find('patch_embed' ) + len('patch_embed' )]
A_ : List[str] = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
A_ : Tuple = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A_ : Optional[int] = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
A_ : Any = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
A_ : Any = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
A_ : Tuple = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
A_ : List[str] = key[key.find('block' ) + len('block' )]
A_ : str = key.replace(f"""block{idx}""" , f"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
A_ : List[str] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
A_ : Optional[Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
A_ : Optional[int] = key.replace('attn' , 'attention.self' )
if "fc1" in key:
A_ : Union[str, Any] = key.replace('fc1' , 'dense1' )
if "fc2" in key:
A_ : Optional[Any] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
A_ : Optional[Any] = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
A_ : int = key.replace('linear_fuse.conv' , 'linear_fuse' )
A_ : Optional[int] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A_ : List[Any] = key[key.find('linear_c' ) + len('linear_c' )]
A_ : Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
A_ : Union[str, Any] = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
A_ : Any = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
A_ : Tuple = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
A_ : Any = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
A_ : int = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
A_ : Optional[Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
A_ : Dict = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
A_ : List[Any] = key.replace('module.last_layer_depth' , 'head.head' )
A_ : Optional[int] = value
return new_state_dict
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A_ : str = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
A_ : List[str] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
A_ : List[str] = kv_weight[
: config.hidden_sizes[i], :
]
A_ : Dict = kv_bias[: config.hidden_sizes[i]]
A_ : str = kv_weight[
config.hidden_sizes[i] :, :
]
A_ : List[str] = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : Dict = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ):
"""simple docstring"""
A_ : Tuple = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A_ : Union[str, Any] = GLPNImageProcessor()
# prepare image
A_ : int = prepare_img()
A_ : Dict = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
A_ : str = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
A_ : Tuple = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
A_ : int = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
A_ : int = model(_UpperCAmelCase )
A_ : Optional[int] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A_ : Any = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
A_ : List[str] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
A_ : Optional[int] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1E-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
lowerCamelCase_ : str = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 302
| 1
|
'''simple docstring'''
from collections.abc import Callable
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ = None ) -> None:
'''simple docstring'''
lowerCamelCase_ = []
# Stores indexes of each item for supporting updates and deletion.
lowerCamelCase_ = {}
# Stores current size of heap.
lowerCamelCase_ = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCamelCase_ = key or (lambda SCREAMING_SNAKE_CASE_ : x)
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int | None:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int | None:
'''simple docstring'''
lowerCamelCase_ = int(2 * i + 1 )
return left if 0 < left < self.size else None
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int | None:
'''simple docstring'''
lowerCamelCase_ = int(2 * i + 2 )
return right if 0 < right < self.size else None
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCamelCase_ ,lowerCamelCase_ = self.arr[j], self.arr[i]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = self._left(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self._right(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = right
return valid_parent
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
lowerCamelCase_ = self._parent(SCREAMING_SNAKE_CASE_ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self._swap(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ,lowerCamelCase_ = parent, self._parent(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
lowerCamelCase_ = self._get_valid_parent(SCREAMING_SNAKE_CASE_ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ,lowerCamelCase_ = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
lowerCamelCase_ = self.pos_map[item]
lowerCamelCase_ = [item, self.key(SCREAMING_SNAKE_CASE_ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE_ )
self._heapify_down(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
lowerCamelCase_ = self.pos_map[item]
del self.pos_map[item]
lowerCamelCase_ = self.arr[self.size - 1]
lowerCamelCase_ = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE_ )
self._heapify_down(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
lowerCamelCase_ = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE_ )] )
else:
lowerCamelCase_ = [item, self.key(SCREAMING_SNAKE_CASE_ )]
lowerCamelCase_ = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def UpperCamelCase( self ) -> tuple | None:
'''simple docstring'''
return self.arr[0] if self.size else None
def UpperCamelCase( self ) -> tuple | None:
'''simple docstring'''
lowerCamelCase_ = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _UpperCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[Any] )-> Optional[Any]:
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__UpperCamelCase = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
__UpperCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A_ , A_ )
def A ( self : List[Any] , **A_ : Any )-> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **A_ )
def A ( self : int , **A_ : Dict )-> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def A ( self : Dict , **A_ : int )-> Union[str, Any]:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def A ( self : Optional[Any] )-> Tuple:
shutil.rmtree(self.tmpdirname )
def A ( self : str )-> str:
__UpperCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : str )-> Tuple:
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCamelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCamelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def A ( self : Dict )-> Union[str, Any]:
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__UpperCamelCase = self.get_image_processor(do_normalize=A_ )
__UpperCamelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def A ( self : List[str] )-> str:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(A_ , return_tensors="np" )
__UpperCamelCase = processor(images=A_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self : List[str] )-> List[Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase = processor(text=A_ )
__UpperCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self : str )-> str:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def A ( self : Tuple )-> Dict:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase = processor.batch_decode(A_ )
__UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def A ( self : Dict )-> str:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 505
| 0
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowerCAmelCase = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
_lowerCAmelCase = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
_lowerCAmelCase = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
_lowerCAmelCase = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
_lowerCAmelCase = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__=[1, 10, 100] , __magic_name__=4 , __magic_name__=3.0 ):
"""simple docstring"""
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=__magic_name__ ) as executor:
A_ : Union[str, Any] = []
A_ : Optional[int] = Counter()
A_ : Dict = 0
A_ : Optional[Any] = defaultdict(__magic_name__ )
for task_id, (candidates, test_case) in enumerate(zip(__magic_name__ , __magic_name__ ) ):
for candidate in candidates:
A_ : Optional[Any] = candidate + '''\n''' + test_case
A_ : Dict = (test_program, timeout, task_id, completion_id[task_id])
A_ : Union[str, Any] = executor.submit(__magic_name__ , *__magic_name__ )
futures.append(__magic_name__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__magic_name__ ):
A_ : Optional[Any] = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
A_ , A_ : List[Any] = [], []
for result in results.values():
result.sort()
A_ : Optional[int] = [r[1]['''passed'''] for r in result]
total.append(len(__magic_name__ ) )
correct.append(sum(__magic_name__ ) )
A_ : Optional[Any] = np.array(__magic_name__ )
A_ : int = np.array(__magic_name__ )
A_ : Optional[Any] = k
A_ : int = {f"""pass@{k}""": estimate_pass_at_k(__magic_name__ , __magic_name__ , __magic_name__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a__ ( a , a , a ) -> str:
def estimator(a , a , a ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(a , a ):
A_ : int = itertools.repeat(a , len(a ) )
else:
assert len(a ) == len(a )
A_ : Dict = iter(a )
return np.array([estimator(int(a ) , int(a ) , a ) for n, c in zip(a , a )] )
| 236
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '▁'
_lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase = {
'facebook/mbart-large-en-ro': 1_0_2_4,
'facebook/mbart-large-cc25': 1_0_2_4,
}
# fmt: off
_lowerCAmelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["""input_ids""", """attention_mask"""]
__magic_name__ = []
__magic_name__ = []
def __init__( self , __magic_name__ , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = None , __magic_name__=None , **__magic_name__ , ):
"""simple docstring"""
A_ : Tuple = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , tokenizer_file=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ , additional_special_tokens=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
A_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
A_ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A_ : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A_ : int = 1
A_ : Dict = len(self.sp_model )
A_ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
A_ : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
A_ : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A_ : Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A_ : Union[str, Any] = src_lang if src_lang is not None else '''en_XX'''
A_ : Tuple = self.lang_code_to_id[self._src_lang]
A_ : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : int = None
A_ : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __magic_name__ ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
A_ : Optional[int] = [1] * len(self.prefix_tokens )
A_ : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
A_ : List[str] = [self.sep_token_id]
A_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A_ : str = src_lang
A_ : Tuple = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
A_ : Dict = self.convert_tokens_to_ids(__magic_name__ )
A_ : Any = tgt_lang_id
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[Any] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A_ : Optional[Any] = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Optional[int] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : Dict = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = "en_XX" , __magic_name__ = None , __magic_name__ = "ro_RO" , **__magic_name__ , ):
"""simple docstring"""
A_ : List[Any] = src_lang
A_ : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : int = self.lang_code_to_id[src_lang]
A_ : int = []
A_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Union[str, Any] = self.lang_code_to_id[lang]
A_ : Any = []
A_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
| 236
| 1
|
'''simple docstring'''
__lowerCAmelCase : List[Any] = "Input must be a string of 8 numbers plus letter"
__lowerCAmelCase : str = "TRWAGMYFPDXBNJZSQVHLCKE"
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""Expected string as input, found {type(UpperCamelCase__ ).__name__}"""
raise TypeError(UpperCamelCase__ )
__UpperCAmelCase = spanish_id.replace('''-''' , '''''' ).upper()
if len(UpperCamelCase__ ) != 9:
raise ValueError(UpperCamelCase__ )
try:
__UpperCAmelCase = int(spanish_id_clean[0:8] )
__UpperCAmelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCamelCase__ ) from ex
if letter.isdigit():
raise ValueError(UpperCamelCase__ )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262
|
'''simple docstring'''
from random import randint, random
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 5 , ):
"""simple docstring"""
__UpperCAmelCase = [[-1] * number_of_cells] # Create a highway without any car
__UpperCAmelCase = 0
__UpperCAmelCase = max(UpperCamelCase__ , 0 )
while i < number_of_cells:
__UpperCAmelCase = (
randint(0 , UpperCamelCase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = highway_now[car_index + 1 :]
for cell in range(len(UpperCamelCase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCamelCase__ , -1 )
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = len(UpperCamelCase__ )
# Beforce calculations, the highway is empty
__UpperCAmelCase = [-1] * number_of_cells
for car_index in range(UpperCamelCase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__UpperCAmelCase = min(highway_now[car_index] + 1 , UpperCamelCase__ )
# Number of empty cell before the next car
__UpperCAmelCase = get_distance(UpperCamelCase__ , UpperCamelCase__ ) - 1
# We can't have the car causing an accident
__UpperCAmelCase = min(next_highway[car_index] , UpperCamelCase__ )
if random() < probability:
# Randomly, a driver will slow down
__UpperCAmelCase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = len(highway[0] )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = update(highway[i] , UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = [-1] * number_of_cells
for car_index in range(UpperCamelCase__ ):
__UpperCAmelCase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__UpperCAmelCase = (car_index + speed) % number_of_cells
# Commit the change of position
__UpperCAmelCase = speed
highway.append(UpperCamelCase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262
| 1
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCAmelCase = logging.get_logger(__name__)
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = '''AutoTokenizer'''
_lowercase : Optional[Any] = ['''tokenizer''']
_lowercase : Tuple = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any]=None ) -> str:
"""simple docstring"""
super().__init__(UpperCamelCase_ )
lowercase__ = speaker_embeddings
@classmethod
def lowerCamelCase_ ( cls: int , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int]="speaker_embeddings_path.json" , **UpperCamelCase_: Optional[Any] ) -> str:
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
UpperCamelCase_ , UpperCamelCase_ , subfolder=kwargs.pop('''subfolder''' , UpperCamelCase_ ) , cache_dir=kwargs.pop('''cache_dir''' , UpperCamelCase_ ) , force_download=kwargs.pop('''force_download''' , UpperCamelCase_ ) , proxies=kwargs.pop('''proxies''' , UpperCamelCase_ ) , resume_download=kwargs.pop('''resume_download''' , UpperCamelCase_ ) , local_files_only=kwargs.pop('''local_files_only''' , UpperCamelCase_ ) , use_auth_token=kwargs.pop('''use_auth_token''' , UpperCamelCase_ ) , revision=kwargs.pop('''revision''' , UpperCamelCase_ ) , )
if speaker_embeddings_path is None:
logger.warning(
f'`{os.path.join(UpperCamelCase_ , UpperCamelCase_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
lowercase__ = None
else:
with open(UpperCamelCase_ ) as speaker_embeddings_json:
lowercase__ = json.load(UpperCamelCase_ )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
return cls(tokenizer=UpperCamelCase_ , speaker_embeddings=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: Any="speaker_embeddings_path.json" , UpperCamelCase_: Any="speaker_embeddings" , UpperCamelCase_: bool = False , **UpperCamelCase_: int , ) -> Dict:
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCamelCase_ , UpperCamelCase_ , '''v2''' ) , exist_ok=UpperCamelCase_ )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(UpperCamelCase_ )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , UpperCamelCase_ , f'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=UpperCamelCase_ , )
lowercase__ = os.path.join(UpperCamelCase_ , f'{prompt_key}_{key}.npy' )
lowercase__ = tmp_dict
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , '''w''' ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
super().save_pretrained(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str = None , **UpperCamelCase_: int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , UpperCamelCase_ ) , cache_dir=kwargs.pop('''cache_dir''' , UpperCamelCase_ ) , force_download=kwargs.pop('''force_download''' , UpperCamelCase_ ) , proxies=kwargs.pop('''proxies''' , UpperCamelCase_ ) , resume_download=kwargs.pop('''resume_download''' , UpperCamelCase_ ) , local_files_only=kwargs.pop('''local_files_only''' , UpperCamelCase_ ) , use_auth_token=kwargs.pop('''use_auth_token''' , UpperCamelCase_ ) , revision=kwargs.pop('''revision''' , UpperCamelCase_ ) , )
if path is None:
raise ValueError(
f'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
lowercase__ = np.load(UpperCamelCase_ )
return voice_preset_dict
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[dict] = None ) -> int:
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self: List[str] , UpperCamelCase_: List[str]=None , UpperCamelCase_: Dict=None , UpperCamelCase_: List[str]="pt" , UpperCamelCase_: Optional[int]=256 , UpperCamelCase_: str=False , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Union[str, Any]=False , **UpperCamelCase_: Optional[Any] , ) -> Any:
"""simple docstring"""
if voice_preset is not None and not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(UpperCamelCase_ )
else:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and not voice_preset.endswith('''.npz''' ):
lowercase__ = voice_preset + '''.npz'''
lowercase__ = np.load(UpperCamelCase_ )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
lowercase__ = self.tokenizer(
UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding='''max_length''' , max_length=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 429
|
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(SCREAMING_SNAKE_CASE , n - 1 , SCREAMING_SNAKE_CASE ) * a) % mod
else:
lowercase__ = binary_exponentiation(SCREAMING_SNAKE_CASE , n / 2 , SCREAMING_SNAKE_CASE )
return (b * b) % mod
# a prime number
lowerCAmelCase = 701
lowerCAmelCase = 10_0000_0000
lowerCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 429
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.