code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from math import sqrt
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 10001 ) -> int:
lowercase__ = 0
lowercase__ = 1
while count != nth and number < 3:
number += 1
if is_prime(_SCREAMING_SNAKE_CASE ):
count += 1
while count != nth:
number += 2
if is_prime(_SCREAMING_SNAKE_CASE ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 45
| 1
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase_ = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
lowercase_ = logging.WARNING
def __UpperCamelCase () -> Tuple:
lowercase__ = os.getenv('DATASETS_VERBOSITY' , _SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def __UpperCamelCase () -> str:
return __name__.split('.' )[0]
def __UpperCamelCase () -> logging.Logger:
return logging.getLogger(_get_library_name() )
def __UpperCamelCase () -> None:
# Apply our default configuration to the library root logger.
lowercase__ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def __UpperCamelCase () -> None:
lowercase__ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = None ) -> logging.Logger:
if name is None:
lowercase__ = _get_library_name()
return logging.getLogger(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase () -> int:
return _get_library_root_logger().getEffectiveLevel()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> None:
_get_library_root_logger().setLevel(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase () -> Dict:
return set_verbosity(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase () -> List[str]:
return set_verbosity(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase () -> Optional[Any]:
return set_verbosity(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase () -> str:
return set_verbosity(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase () -> None:
lowercase__ = False
def __UpperCamelCase () -> None:
lowercase__ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , *a : int , **a : Optional[Any] )-> Union[str, Any]: # pylint: disable=unused-argument
"""simple docstring"""
lowercase__ = args[0] if args else None
def __iter__( self : Dict )-> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : Union[str, Any] , a : Any )-> Optional[Any]:
"""simple docstring"""
def empty_fn(*a : Optional[int] , **a : Dict ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
return self
def __exit__( self : str , a : Optional[int] , a : Tuple , a : Any )-> Tuple:
"""simple docstring"""
return
lowercase_ = True
class SCREAMING_SNAKE_CASE :
def __call__( self : Optional[int] , *a : List[Any] , a : Dict=False , **a : Optional[int] )-> Dict:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*a , **a )
else:
return EmptyTqdm(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : Any , *a : Optional[int] , **a : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase_ = _tqdm_cls()
def __UpperCamelCase () -> bool:
global _tqdm_active
return bool(_tqdm_active )
def __UpperCamelCase () -> List[str]:
global _tqdm_active
lowercase__ = True
def __UpperCamelCase () -> Any:
global _tqdm_active
lowercase__ = False
| 45
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]:
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase__ = '636036'
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run['id']
break
return workflow_run_id
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read().decode('UTF-8' )
return results
| 45
| 1
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowercase_ = Mapping[str, np.ndarray]
lowercase_ = Mapping[str, Any] # Is a nested dict.
lowercase_ = 0.01
@dataclasses.dataclass(frozen=UpperCAmelCase )
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_UpperCamelCase : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_UpperCamelCase : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_UpperCamelCase : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_UpperCamelCase : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_UpperCamelCase : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_UpperCamelCase : Optional[str] = None
# Templates used to generate this protein (prediction-only)
_UpperCamelCase : Optional[Sequence[str]] = None
# Chain corresponding to each parent
_UpperCamelCase : Optional[Sequence[int]] = None
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Protein:
lowercase__ = R'(\[[A-Z]+\]\n)'
lowercase__ = [tag.strip() for tag in re.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0]
lowercase__ = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
lowercase__ = ["N", "CA", "C"]
lowercase__ = None
lowercase__ = None
lowercase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ = g[1][0].strip()
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ = 'X' # FIXME: strings are immutable
lowercase__ = np.array(
[residue_constants.restype_order.get(_SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ = []
for axis in range(3 ):
tertiary.append(list(map(_SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) )
lowercase__ = np.array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
lowercase__ = np.zeros(
(
len(_SCREAMING_SNAKE_CASE ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_SCREAMING_SNAKE_CASE , atom_mask=_SCREAMING_SNAKE_CASE , aatype=_SCREAMING_SNAKE_CASE , residue_index=np.arange(len(_SCREAMING_SNAKE_CASE ) ) , b_factors=_SCREAMING_SNAKE_CASE , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ) -> List[str]:
lowercase__ = []
lowercase__ = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
lowercase__ = prot.parents
lowercase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ = [p for i, p in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i == chain_id]
if parents is None or len(_SCREAMING_SNAKE_CASE ) == 0:
lowercase__ = ['N/A']
pdb_headers.append(F"""PARENT {" ".join(_SCREAMING_SNAKE_CASE )}""" )
return pdb_headers
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = []
lowercase__ = pdb_str.split('\n' )
lowercase__ = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
lowercase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ = []
if prot.parents_chain_index is not None:
lowercase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_SCREAMING_SNAKE_CASE ) , [] )
parent_dict[str(_SCREAMING_SNAKE_CASE )].append(_SCREAMING_SNAKE_CASE )
lowercase__ = max([int(_SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ = parent_dict.get(str(_SCREAMING_SNAKE_CASE ) , ['N/A'] )
parents_per_chain.append(_SCREAMING_SNAKE_CASE )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ = [['N/A']]
def make_parent_line(_SCREAMING_SNAKE_CASE ) -> str:
return F"""PARENT {" ".join(_SCREAMING_SNAKE_CASE )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ = 0
for i, l in enumerate(_SCREAMING_SNAKE_CASE ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_SCREAMING_SNAKE_CASE )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_SCREAMING_SNAKE_CASE ):
lowercase__ = parents_per_chain[chain_counter]
else:
lowercase__ = ['N/A']
out_pdb_lines.append(make_parent_line(_SCREAMING_SNAKE_CASE ) )
return "\n".join(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = residue_constants.restypes + ['X']
def res_atoa(_SCREAMING_SNAKE_CASE ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
lowercase__ = residue_constants.atom_types
lowercase__ = []
lowercase__ = prot.atom_mask
lowercase__ = prot.aatype
lowercase__ = prot.atom_positions
lowercase__ = prot.residue_index.astype(np.intaa )
lowercase__ = prot.b_factors
lowercase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
lowercase__ = get_pdb_headers(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
pdb_lines.extend(_SCREAMING_SNAKE_CASE )
lowercase__ = aatype.shape[0]
lowercase__ = 1
lowercase__ = 0
lowercase__ = string.ascii_uppercase
lowercase__ = None
# Add all atom sites.
for i in range(_SCREAMING_SNAKE_CASE ):
lowercase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ = 'ATOM'
lowercase__ = atom_name if len(_SCREAMING_SNAKE_CASE ) == 4 else F""" {atom_name}"""
lowercase__ = ''
lowercase__ = ''
lowercase__ = 1.0_0
lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ = ''
lowercase__ = 'A'
if chain_index is not None:
lowercase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
lowercase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ = True
lowercase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ = 'TER'
lowercase__ = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ) -> Protein:
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=_SCREAMING_SNAKE_CASE , remark=_SCREAMING_SNAKE_CASE , parents=_SCREAMING_SNAKE_CASE , parents_chain_index=_SCREAMING_SNAKE_CASE , )
| 45
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = generator.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = 'cyberpunk 2077'
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = 'A painting of a squirrel eating a burger '
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 45
| 1
|
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> str:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict:
"""simple docstring"""
lowercase__ = self.dummy_uncond_unet
lowercase__ = PNDMScheduler()
lowercase__ = PNDMPipeline(unet=a , scheduler=a )
pndm.to(a )
pndm.set_progress_bar_config(disable=a )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pndm(generator=a , num_inference_steps=20 , output_type='numpy' ).images
lowercase__ = torch.manual_seed(0 )
lowercase__ = pndm(generator=a , num_inference_steps=20 , output_type='numpy' , return_dict=a )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = 'google/ddpm-cifar10-32'
lowercase__ = UNetaDModel.from_pretrained(a )
lowercase__ = PNDMScheduler()
lowercase__ = PNDMPipeline(unet=a , scheduler=a )
pndm.to(a )
pndm.set_progress_bar_config(disable=a )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pndm(generator=a , output_type='numpy' ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 45
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase_ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowercase_ = {"""facebook/blenderbot_small-90M""": 512}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(_SCREAMING_SNAKE_CASE )
return pairs
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = ['input_ids', 'attention_mask']
def __init__( self : Any , a : Optional[Any] , a : str , a : Union[str, Any]="__start__" , a : List[Any]="__end__" , a : Optional[Any]="__unk__" , a : Any="__null__" , **a : int , )-> Any:
"""simple docstring"""
super().__init__(unk_token=a , bos_token=a , eos_token=a , pad_token=a , **a )
with open(a , encoding='utf-8' ) as vocab_handle:
lowercase__ = json.load(a )
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(a , encoding='utf-8' ) as merges_handle:
lowercase__ = merges_handle.read().split('\n' )[1:-1]
lowercase__ = [tuple(merge.split() ) for merge in merges]
lowercase__ = dict(zip(a , range(len(a ) ) ) )
lowercase__ = {}
@property
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : str )-> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = re.sub('([.,!?()])' , R' \1' , a )
lowercase__ = re.sub('(\')' , R' \1 ' , a )
lowercase__ = re.sub(R'\s{2,}' , ' ' , a )
if "\n" in token:
lowercase__ = token.replace('\n' , ' __newln__' )
lowercase__ = token.split(' ' )
lowercase__ = []
for token in tokens:
if not len(a ):
continue
lowercase__ = token.lower()
lowercase__ = tuple(a )
lowercase__ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
lowercase__ = get_pairs(a )
if not pairs:
words.append(a )
continue
while True:
lowercase__ = min(a , key=lambda a : self.bpe_ranks.get(a , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(a ):
try:
lowercase__ = word.index(a , a )
new_word.extend(word[i:j] )
lowercase__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ = tuple(a )
lowercase__ = new_word
if len(a ) == 1:
break
else:
lowercase__ = get_pairs(a )
lowercase__ = '@@ '.join(a )
lowercase__ = word[:-4]
lowercase__ = word
words.append(a )
return " ".join(a )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str )-> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = re.findall(R'\S+\n?' , a )
for token in words:
split_tokens.extend(list(self.bpe(a ).split(' ' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : str )-> int:
"""simple docstring"""
lowercase__ = token.lower()
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : int )-> str:
"""simple docstring"""
return self.decoder.get(a , self.unk_token )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : List[str] )-> str:
"""simple docstring"""
lowercase__ = ' '.join(a ).replace('@@ ' , '' ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + '\n' )
lowercase__ = 0
with open(a , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowercase__ = token_index
writer.write(' '.join(a ) + '\n' )
index += 1
return vocab_file, merge_file
| 45
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45
| 1
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = CLIPConfig
_UpperCamelCase : Tuple = ['CLIPEncoderLayer']
def __init__( self : Optional[int] , a : CLIPConfig )-> List[Any]:
"""simple docstring"""
super().__init__(a )
lowercase__ = CLIPVisionModelWithProjection(config.vision_config )
lowercase__ = nn.Linear(config.vision_config.projection_dim , 1 )
lowercase__ = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Optional[Any] , a : int , a : Dict=0.5 , a : Optional[int]=0.5 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.vision_model(a )[0]
lowercase__ = self.p_head(a )
lowercase__ = nsfw_detected.flatten()
lowercase__ = nsfw_detected > p_threshold
lowercase__ = nsfw_detected.tolist()
if any(a ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(a ):
if nsfw_detected_:
lowercase__ = np.zeros(images[idx].shape )
lowercase__ = self.w_head(a )
lowercase__ = watermark_detected.flatten()
lowercase__ = watermark_detected > w_threshold
lowercase__ = watermark_detected.tolist()
if any(a ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(a ):
if watermark_detected_:
lowercase__ = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 45
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Optional[int] = 'roberta-prelayernorm'
def __init__( self : List[Any] , a : int=50_265 , a : Any=768 , a : str=12 , a : List[str]=12 , a : Any=3_072 , a : List[Any]="gelu" , a : Tuple=0.1 , a : int=0.1 , a : Dict=512 , a : List[Any]=2 , a : str=0.02 , a : str=1E-1_2 , a : List[Any]=1 , a : Optional[int]=0 , a : Optional[Any]=2 , a : Any="absolute" , a : Optional[Any]=True , a : Any=None , **a : Union[str, Any] , )-> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 45
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , a : Dict , a : List[Any]=13 , a : List[str]=7 , a : Optional[int]=True , a : str=True , a : Optional[int]=True , a : str=True , a : str=99 , a : str=32 , a : List[str]=2 , a : List[str]=4 , a : Union[str, Any]=37 , a : List[str]="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Tuple=512 , a : Optional[int]=16 , a : int=2 , a : Any=0.02 , a : Any=3 , a : Dict=4 , a : List[str]=None , a : Tuple=1_000 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = range_bbox
def SCREAMING_SNAKE_CASE_ ( self : int )-> Any:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowercase__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__ = bbox[i, j, 3]
lowercase__ = bbox[i, j, 1]
lowercase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__ = bbox[i, j, 2]
lowercase__ = bbox[i, j, 0]
lowercase__ = t
lowercase__ = tf.convert_to_tensor(a )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] , a : List[str] , a : Union[str, Any] , a : Union[str, Any] , a : int , a : Dict , a : int , a : Any )-> List[str]:
"""simple docstring"""
lowercase__ = TFLayoutLMModel(config=a )
lowercase__ = model(a , a , attention_mask=a , token_type_ids=a )
lowercase__ = model(a , a , token_type_ids=a )
lowercase__ = model(a , a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Any , a : Union[str, Any] , a : Tuple , a : Dict , a : Dict , a : Dict , a : Tuple , a : List[str] )-> int:
"""simple docstring"""
lowercase__ = TFLayoutLMForMaskedLM(config=a )
lowercase__ = model(a , a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : List[str] , a : List[str] , a : Union[str, Any] , a : List[str] , a : List[Any] , a : Dict , a : Any , a : Optional[int] )-> Tuple:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFLayoutLMForSequenceClassification(config=a )
lowercase__ = model(a , a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Union[str, Any] , a : Tuple , a : Optional[Any] , a : Optional[int] , a : List[Any] , a : List[str] , a : Optional[int] , a : List[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFLayoutLMForTokenClassification(config=a )
lowercase__ = model(a , a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[Any] , a : Optional[Any] , a : int , a : int , a : Union[str, Any] , a : Any , a : Optional[int] , a : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = TFLayoutLMForQuestionAnswering(config=a )
lowercase__ = model(a , a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[str] = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : List[str] = False
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : Optional[int] = 10
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
lowercase__ = TFLayoutLMModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Tuple:
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFLayoutLMModel.from_pretrained(a )
self.assertIsNotNone(a )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> str:
"""simple docstring"""
pass
def __UpperCamelCase () -> Tuple:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowercase__ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
lowercase__ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowercase__ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowercase__ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowercase__ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ = model(input_ids=a , bbox=a , attention_mask=a , token_type_ids=a )
# test the sequence output on [0, :3, :3]
lowercase__ = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-3 ) )
# test the pooled output on [1, :3]
lowercase__ = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , a , atol=1E-3 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ = model(
input_ids=a , bbox=a , attention_mask=a , token_type_ids=a , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowercase__ = outputs.loss
lowercase__ = (2,)
self.assertEqual(loss.shape , a )
# test the shape of the logits
lowercase__ = outputs.logits
lowercase__ = (2, 2)
self.assertEqual(logits.shape , a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ = model(
input_ids=a , bbox=a , attention_mask=a , token_type_ids=a , labels=a )
# test the shape of the logits
lowercase__ = outputs.logits
lowercase__ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
lowercase__ = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ = model(input_ids=a , bbox=a , attention_mask=a , token_type_ids=a )
# test the shape of the logits
lowercase__ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , a )
self.assertEqual(outputs.end_logits.shape , a )
| 45
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = value_function
lowercase__ = unet
lowercase__ = scheduler
lowercase__ = env
lowercase__ = env.get_dataset()
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].std()
except: # noqa: E722
pass
lowercase__ = env.observation_space.shape[0]
lowercase__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
for key, val in cond.items():
lowercase__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = x.shape[0]
lowercase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ = self.scheduler._get_variance(a )
lowercase__ = torch.exp(0.5 * posterior_variance )
lowercase__ = model_std * grad
lowercase__ = 0
lowercase__ = x.detach()
lowercase__ = x + scale * grad
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
return x, y
def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.normalize(a , 'observations' )
lowercase__ = obs[None].repeat(a , axis=0 )
lowercase__ = {0: self.to_torch(a )}
lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ = randn_tensor(a , device=self.unet.device )
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
# run the diffusion process
lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
lowercase__ = y.argsort(0 , descending=a ).squeeze()
lowercase__ = x[sorted_idx]
lowercase__ = sorted_values[:, :, : self.action_dim]
lowercase__ = actions.detach().cpu().numpy()
lowercase__ = self.de_normalize(a , key='actions' )
# select the action with the highest value
if y is not None:
lowercase__ = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ = np.random.randint(0 , a )
lowercase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 45
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Any , a : Any , a : Dict=7 , a : Optional[int]=3 , a : Dict=30 , a : List[str]=400 , a : Union[str, Any]=True , a : List[Any]=None , a : int=True , a : Any=[0.5, 0.5, 0.5] , a : str=[0.5, 0.5, 0.5] , a : Dict=True , a : Optional[int]=1 / 255 , a : str=True , )-> List[str]:
"""simple docstring"""
lowercase__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_pad
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : int , a : Tuple=False )-> List[Any]:
"""simple docstring"""
if not batched:
lowercase__ = image_inputs[0]
if isinstance(a , Image.Image ):
lowercase__ , lowercase__ = image.size
else:
lowercase__ , lowercase__ = image.shape[1], image.shape[2]
if w < h:
lowercase__ = int(self.size['shortest_edge'] * h / w )
lowercase__ = self.size['shortest_edge']
elif w > h:
lowercase__ = self.size['shortest_edge']
lowercase__ = int(self.size['shortest_edge'] * w / h )
else:
lowercase__ = self.size['shortest_edge']
lowercase__ = self.size['shortest_edge']
else:
lowercase__ = []
for image in image_inputs:
lowercase__ , lowercase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ = max(a , key=lambda a : item[0] )[0]
lowercase__ = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = DeformableDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'do_rescale' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , a )
lowercase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(a , batched=a )
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[str]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Dict:
"""simple docstring"""
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowercase__ = json.loads(f.read() )
lowercase__ = {'image_id': 39_769, 'annotations': target}
# encode them
lowercase__ = DeformableDetrImageProcessor()
lowercase__ = image_processing(images=a , annotations=a , return_tensors='pt' )
# verify pixel values
lowercase__ = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , a )
lowercase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a , atol=1E-4 ) )
# verify area
lowercase__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a ) )
# verify boxes
lowercase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , a )
lowercase__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a , atol=1E-3 ) )
# verify image_id
lowercase__ = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a ) )
# verify is_crowd
lowercase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a ) )
# verify class_labels
lowercase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a ) )
# verify orig_size
lowercase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a ) )
# verify size
lowercase__ = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowercase__ = json.loads(f.read() )
lowercase__ = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
lowercase__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowercase__ = DeformableDetrImageProcessor(format='coco_panoptic' )
lowercase__ = image_processing(images=a , annotations=a , masks_path=a , return_tensors='pt' )
# verify pixel values
lowercase__ = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , a )
lowercase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a , atol=1E-4 ) )
# verify area
lowercase__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a ) )
# verify boxes
lowercase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , a )
lowercase__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a , atol=1E-3 ) )
# verify image_id
lowercase__ = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a ) )
# verify is_crowd
lowercase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a ) )
# verify class_labels
lowercase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a ) )
# verify masks
lowercase__ = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , a )
# verify orig_size
lowercase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a ) )
# verify size
lowercase__ = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a ) )
| 45
|
from PIL import Image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 45
| 1
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = """Usage of script: script_name <size_of_canvas:int>"""
lowercase_ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> list[list[bool]]:
lowercase__ = [[False for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
return canvas
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> None:
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = bool(random.getrandbits(1 ) )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> list[list[bool]]:
lowercase__ = np.array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = __judge_point(
_SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowercase__ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowercase__ = current_canvas.tolist()
return return_canvas
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
lowercase__ = 0
lowercase__ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowercase__ = pt
if pt:
if alive < 2:
lowercase__ = False
elif alive == 2 or alive == 3:
lowercase__ = True
elif alive > 3:
lowercase__ = False
else:
if alive == 3:
lowercase__ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["""w""", """k"""])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 45
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {'height': 18, 'width': 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_thumbnail' ) )
self.assertTrue(hasattr(a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 45
| 1
|
lowercase_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_SCREAMING_SNAKE_CASE )
lowercase__ = ''.join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
lowercase__ = len(_SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = b'=' * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6)
else:
lowercase__ = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = (
'argument should be a bytes-like object or ASCII string, '
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
lowercase__ = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
lowercase__ = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = ''.join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = ''.join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
|
import math
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 45
| 1
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase_ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase_ = [0, 25, 50]
lowercase_ = [25, 50, 75]
lowercase_ = fuzz.membership.trimf(X, abca)
lowercase_ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase_ = np.ones(75)
lowercase_ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowercase_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase_ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase_ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase_ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 45
|
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 45
| 1
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowercase_ = logging.getLogger(__name__)
lowercase_ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCAmelCase )} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_UpperCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_UpperCamelCase : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
_UpperCamelCase : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
_UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
_UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_UpperCamelCase : float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def SCREAMING_SNAKE_CASE_ ( self : int )-> int:
"""simple docstring"""
if self.train_file is not None:
lowercase__ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowercase__ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
lowercase__ = [json.loads(_SCREAMING_SNAKE_CASE ) for line in f.read().splitlines() if (len(_SCREAMING_SNAKE_CASE ) > 0 and not line.isspace())]
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
lowercase__ = {c: dataset[c] for c in dataset.column_names}
lowercase__ = refs
return Dataset.from_dict(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase () -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowercase__ = {}
if data_args.train_file is not None:
lowercase__ = data_args.train_file
if data_args.validation_file is not None:
lowercase__ = data_args.validation_file
lowercase__ = data_args.train_file.split('.' )[-1]
if extension == "txt":
lowercase__ = 'text'
lowercase__ = load_dataset(_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase__ = AutoConfig.from_pretrained(model_args.config_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowercase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
lowercase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
lowercase__ = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowercase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowercase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
lowercase__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowercase__ = AutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowercase__ = datasets['train'].column_names
else:
lowercase__ = datasets['validation'].column_names
lowercase__ = 'text' if 'text' in column_names else column_names[0]
lowercase__ = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# Remove empty lines
lowercase__ = [line for line in examples['text'] if len(_SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=data_args.max_seq_length )
lowercase__ = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowercase__ = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowercase__ = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowercase__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowercase__ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowercase__ = DataCollatorForWholeWordMask(tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase__ = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowercase__ = model_args.model_name_or_path
else:
lowercase__ = None
lowercase__ = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase__ = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__ = trainer.evaluate()
lowercase__ = math.exp(eval_output['eval_loss'] )
lowercase__ = perplexity
lowercase__ = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {'height': 18, 'width': 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_thumbnail' ) )
self.assertTrue(hasattr(a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 45
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> str:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(a , interpolate_pos_encoding=a )
# verify the logits
lowercase__ = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowercase__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(a )
| 45
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase__ , lowercase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 45
| 1
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any: # picklable for multiprocessing
return x.sum()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: # picklable for multiprocessing
return i + 1
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : int
_UpperCamelCase : str
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[Any]:
"""simple docstring"""
lowercase__ = {}
lowercase__ = []
lowercase__ = 1
lowercase__ = [1, 2]
lowercase__ = {'a': 1, 'b': 2}
lowercase__ = {'a': [1, 2], 'b': [3, 4]}
lowercase__ = {'a': {'1': 1}, 'b': 2}
lowercase__ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
lowercase__ = {}
lowercase__ = []
lowercase__ = 2
lowercase__ = [2, 3]
lowercase__ = {'a': 2, 'b': 3}
lowercase__ = {'a': [2, 3], 'b': [4, 5]}
lowercase__ = {'a': {'1': 2}, 'b': 3}
lowercase__ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
lowercase__ = 2
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
lowercase__ = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
lowercase__ = {'a': 2, 'b': 0, 'c': 2}
lowercase__ = {
'a': np.eye(2 ).astype(a ),
'b': np.zeros(3 ).astype(a ),
'c': np.ones(2 ).astype(a ),
}
self.assertEqual(map_nested(a , a , map_numpy=a ) , a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a , a , map_numpy=a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(a , a , map_numpy=a , num_proc=a ) , a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a , a , map_numpy=a , num_proc=a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(a ): # can't pickle a local lambda
map_nested(lambda a : x + 1 , a , num_proc=a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Any:
"""simple docstring"""
lowercase__ = {'a': 1, 'b': 2}
lowercase__ = {'a': 3, 'b': 4}
lowercase__ = {'a': 5, 'b': 6}
lowercase__ = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(a , a , a ) ) , a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> int:
"""simple docstring"""
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : Any = 'bar'
lowercase__ = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(a , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
lowercase__ = {F"""{i}""": i for i in range(_SCREAMING_SNAKE_CASE )}
lowercase__ = map_nested(lambda _SCREAMING_SNAKE_CASE : x + 10 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
lowercase__ = layers.Dense(2 )
def gen_random_output():
lowercase__ = tf.random.uniform((1, 3) )
return model(a ).numpy()
with temp_seed(42 , set_tensorflow=a ):
lowercase__ = gen_random_output()
with temp_seed(42 , set_tensorflow=a ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(a , a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
import torch
def gen_random_output():
lowercase__ = torch.nn.Linear(3 , 2 )
lowercase__ = torch.rand(1 , 3 )
return model(a ).detach().numpy()
with temp_seed(42 , set_pytorch=a ):
lowercase__ = gen_random_output()
with temp_seed(42 , set_pytorch=a ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(a , a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Tuple:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
lowercase__ = gen_random_output()
with temp_seed(42 ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(a , a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = NestedDataStructure(_SCREAMING_SNAKE_CASE ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = NestedDataStructure(_SCREAMING_SNAKE_CASE ).flatten()
assert output == expected_output
def __UpperCamelCase () -> int:
lowercase__ = A(x=1 , y='foobar' )
lowercase__ = {'x': 1, 'y': 'foobar'}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
lowercase__ = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]}
lowercase__ = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
with pytest.raises(_SCREAMING_SNAKE_CASE ):
asdict([1, A(x=10 , y='foo' )] )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
return text.split()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __UpperCamelCase () -> Any:
with Pool(2 ) as pool:
lowercase__ = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowercase__ = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowercase__ = []
for yield_time, content in iflatmap_unordered(
_SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_SCREAMING_SNAKE_CASE )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(_SCREAMING_SNAKE_CASE ) == 4
| 45
|
from scipy.stats import spearmanr
import datasets
lowercase_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase_ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]:
"""simple docstring"""
lowercase__ = spearmanr(a , a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 45
| 1
|
class SCREAMING_SNAKE_CASE :
def __init__( self : Any )-> str:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
lowercase__ = {}
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Tuple )-> Dict:
"""simple docstring"""
if vertex not in self.adjacency:
lowercase__ = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE_ ( self : str , a : str , a : Union[str, Any] , a : Dict )-> Tuple:
"""simple docstring"""
self.add_vertex(a )
self.add_vertex(a )
if head == tail:
return
lowercase__ = weight
lowercase__ = weight
def SCREAMING_SNAKE_CASE_ ( self : int )-> int:
"""simple docstring"""
lowercase__ = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for i in range(len(a ) ):
lowercase__ = list(edges[i] )
edges.sort(key=lambda a : e[2] )
for i in range(len(a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = weight
lowercase__ = weight
def __str__( self : int )-> int:
"""simple docstring"""
lowercase__ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a : List[str]=None , a : Optional[int]=None )-> int:
"""simple docstring"""
lowercase__ = Graph()
if vertices is None:
lowercase__ = []
if edges is None:
lowercase__ = []
for vertex in vertices:
g.add_vertex(a )
for edge in edges:
g.add_edge(*a )
return g
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] )-> str:
"""simple docstring"""
lowercase__ = {}
lowercase__ = {}
def __len__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
return len(self.parent )
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] )-> str:
"""simple docstring"""
if item in self.parent:
return self.find(a )
lowercase__ = item
lowercase__ = 0
return item
def SCREAMING_SNAKE_CASE_ ( self : str , a : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(a )
if item != self.parent[item]:
lowercase__ = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Union[str, Any] , a : str )-> List[str]:
"""simple docstring"""
lowercase__ = self.find(a )
lowercase__ = self.find(a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = graph.num_vertices
lowercase__ = Graph.UnionFind()
lowercase__ = []
while num_components > 1:
lowercase__ = {}
for vertex in graph.get_vertices():
lowercase__ = -1
lowercase__ = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = union_find.find(a )
lowercase__ = union_find.find(a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ = cheap_edge[vertex]
if union_find.find(a ) != union_find.find(a ):
union_find.union(a , a )
mst_edges.append(cheap_edge[vertex] )
lowercase__ = num_components - 1
lowercase__ = Graph.build(edges=a )
return mst
| 45
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 1
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowercase_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : List[str] , a : Optional[int] )-> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = torchvision.models.resnetaaa(pretrained=a )
lowercase__ = list(model.children() )[:-2]
lowercase__ = nn.Sequential(*a )
lowercase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Union[str, Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.pool(self.model(a ) )
lowercase__ = torch.flatten(a , start_dim=2 )
lowercase__ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Any , a : str , a : str , a : Optional[Any] , a : List[Any] , a : Dict )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = [json.loads(a ) for l in open(a )]
lowercase__ = os.path.dirname(a )
lowercase__ = tokenizer
lowercase__ = labels
lowercase__ = len(a )
lowercase__ = max_seq_length
lowercase__ = transforms
def __len__( self : Dict )-> Any:
"""simple docstring"""
return len(self.data )
def __getitem__( self : Optional[Any] , a : Optional[Any] )-> Any:
"""simple docstring"""
lowercase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=a ) )
lowercase__ , lowercase__ , lowercase__ = sentence[0], sentence[1:-1], sentence[-1]
lowercase__ = sentence[: self.max_seq_length]
lowercase__ = torch.zeros(self.n_classes )
lowercase__ = 1
lowercase__ = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
lowercase__ = self.transforms(a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[str]:
"""simple docstring"""
lowercase__ = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = [len(row['sentence'] ) for row in batch]
lowercase__ , lowercase__ = len(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
lowercase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long )
lowercase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
lowercase__ = input_row['sentence']
lowercase__ = 1
lowercase__ = torch.stack([row['image'] for row in batch] )
lowercase__ = torch.stack([row['label'] for row in batch] )
lowercase__ = torch.stack([row['image_start_token'] for row in batch] )
lowercase__ = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __UpperCamelCase () -> List[str]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __UpperCamelCase () -> Dict:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 45
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowercase_ = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = EfficientNetConfig()
lowercase__ = CONFIG_MAP[model_name]['hidden_dim']
lowercase__ = CONFIG_MAP[model_name]['width_coef']
lowercase__ = CONFIG_MAP[model_name]['depth_coef']
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = CONFIG_MAP[model_name]['dropout_rate']
lowercase__ = CONFIG_MAP[model_name]['dw_padding']
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 1000
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> Tuple:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
lowercase__ = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
lowercase__ = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
lowercase__ = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ = 'efficientnet.' + item[1]
lowercase__ = 'classifier.weight'
lowercase__ = 'classifier.bias'
return key_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , )
lowercase__ = original_model.trainable_variables
lowercase__ = original_model.non_trainable_variables
lowercase__ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ = param.numpy()
lowercase__ = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowercase__ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.detach().numpy()
# Original model inference
lowercase__ = False
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowercase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 45
| 1
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
return base * power(_SCREAMING_SNAKE_CASE , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
lowercase_ = int(input("""Enter the base: """).strip())
lowercase_ = int(input("""Enter the exponent: """).strip())
lowercase_ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowercase_ = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 45
|
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 45
| 1
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , a : Optional[Any] , a : Union[str, Any]=13 , a : int=7 , a : Dict=True , a : Any=True , a : int=True , a : List[Any]=True , a : Union[str, Any]=99 , a : str=32 , a : Any=5 , a : Dict=4 , a : str=4 , a : Optional[Any]="gelu" , a : Optional[int]=0.0 , a : Dict=0.1 , a : Any=True , a : Tuple=512 , a : Union[str, Any]=16 , a : str=2 , a : int=0.02 , a : Tuple=3 , a : Optional[Any]=4 , a : Union[str, Any]=None , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_multiple_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = weight_tying
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Any:
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : int )-> int:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.prepare_config_and_inputs()
lowercase__ = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : str , a : Optional[int] , a : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = GPTNeoXJapaneseModel(config=A__ )
model.to(A__ )
model.eval()
lowercase__ = model(A__ , attention_mask=A__ )
lowercase__ = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Union[str, Any] , a : str )-> str:
"""simple docstring"""
lowercase__ = True
lowercase__ = GPTNeoXJapaneseModel(A__ )
model.to(A__ )
model.eval()
lowercase__ = model(A__ , attention_mask=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , a : Union[str, Any] , a : Any , a : int )-> Any:
"""simple docstring"""
lowercase__ = GPTNeoXJapaneseForCausalLM(config=A__ )
model.to(A__ )
model.eval()
lowercase__ = model(A__ , attention_mask=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Optional[Any] , a : Tuple , a : int )-> Optional[Any]:
"""simple docstring"""
lowercase__ = True
lowercase__ = GPTNeoXJapaneseForCausalLM(config=A__ )
model.to(A__ )
model.eval()
# first forward pass
lowercase__ = model(A__ , attention_mask=A__ , use_cache=A__ )
lowercase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase__ = model(A__ , attention_mask=A__ , output_hidden_states=A__ )
lowercase__ = output_from_no_past['hidden_states'][0]
lowercase__ = model(
A__ , attention_mask=A__ , past_key_values=A__ , output_hidden_states=A__ , )['hidden_states'][0]
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A__ , A__ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : int )-> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (_lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_UpperCamelCase : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : int = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : Tuple = False
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]:
"""simple docstring"""
lowercase__ = GPTNeoXJapaneseModelTester(self )
lowercase__ = ConfigTester(self , config_class=A__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A__ , A__ , A__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A__ , A__ , A__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase__ = None
self.model_tester.create_and_check_model_as_decoder(A__ , A__ , A__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A__ , A__ , A__ )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[Any]:
"""simple docstring"""
lowercase__ = 'abeja/gpt-neox-japanese-2.7b'
lowercase__ = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
lowercase__ = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
lowercase__ = GPTNeoXJapaneseTokenizer.from_pretrained(A__ )
lowercase__ = GPTNeoXJapaneseForCausalLM.from_pretrained(A__ )
lowercase__ = []
for prompt in prompts:
lowercase__ = tokenizer(A__ , return_tensors='pt' ).input_ids
lowercase__ = model.generate(A__ , max_length=50 )
lowercase__ = tokenizer.batch_decode(A__ , skip_special_tokens=A__ )
predicted_outputs += generated_string
self.assertListEqual(A__ , A__ )
| 700
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
return abs(__A ) if a == 0 else greatest_common_divisor(b % a , __A )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowercase__ , lowercase__ = y, x % y
return abs(__A )
def __UpperCamelCase () -> Dict:
try:
lowercase__ = input('Enter two integers separated by comma (,): ' ).split(',' )
lowercase__ = int(nums[0] )
lowercase__ = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(__A , __A )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__A , __A )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 701
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
lowercase_ = """▁"""
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
_UpperCamelCase : int = BarthezTokenizer
def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 45
| 0
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a : Dict , a : Any=13 , a : Dict=32 , a : List[Any]=2 , a : Optional[int]=3 , a : Optional[int]=16 , a : Dict=[1, 2, 1] , a : List[Any]=[2, 2, 4] , a : str=2 , a : Dict=2.0 , a : Dict=True , a : Tuple=0.0 , a : int=0.0 , a : List[Any]=0.1 , a : Any="gelu" , a : int=False , a : List[Any]=True , a : List[str]=0.02 , a : Union[str, Any]=1E-5 , a : Union[str, Any]=True , a : List[Any]=None , a : Tuple=True , a : Optional[int]=10 , a : Any=8 , a : Optional[int]=["stage1", "stage2", "stage3"] , a : str=[1, 2, 3] , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = patch_norm
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = is_training
lowercase__ = scope
lowercase__ = use_labels
lowercase__ = type_sequence_label_size
lowercase__ = encoder_stride
lowercase__ = out_features
lowercase__ = out_indices
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Optional[Any] , a : List[Any] , a : List[Any] )-> Dict:
"""simple docstring"""
lowercase__ = MaskFormerSwinModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
lowercase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : str , a : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = MaskFormerSwinBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase_ ):
lowercase__ = ['stem']
lowercase__ = MaskFormerSwinBackbone(config=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (snake_case__ , snake_case__ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
_UpperCamelCase : Tuple = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : int = False
_UpperCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> str:
"""simple docstring"""
lowercase__ = MaskFormerSwinModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCAmelCase_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Any:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : int )-> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Tuple:
"""simple docstring"""
return
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase_ )
@unittest.skip('Swin does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> int:
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> int:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Tuple , a : Optional[int] , a : Optional[Any] , a : Optional[Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# Swin has a different seq_length
lowercase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[int]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a : List[Any] ):
lowercase__ = 0
return t
def check_equivalence(a : str , a : List[Any] , a : int , a : Optional[int]={} ):
with torch.no_grad():
lowercase__ = model(**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , **UpperCAmelCase_ )
lowercase__ = model(**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , **UpperCAmelCase_ ).to_tuple()
def recursive_check(a : Any , a : Optional[int] ):
if isinstance(UpperCAmelCase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
recursive_check(UpperCAmelCase_ , UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase_ , UpperCAmelCase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase_ ) , set_nan_tensor_to_zero(UpperCAmelCase_ ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(UpperCAmelCase_ ).any()} and `inf`: {torch.isinf(UpperCAmelCase_ )}. Dict has"""
f""" `nan`: {torch.isnan(UpperCAmelCase_ ).any()} and `inf`: {torch.isinf(UpperCAmelCase_ )}."""
) , )
recursive_check(UpperCAmelCase_ , UpperCAmelCase_ )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
lowercase__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowercase__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
lowercase__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowercase__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
lowercase__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , {'output_hidden_states': True} )
lowercase__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
lowercase__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , {'output_hidden_states': True} )
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase , snake_case__ ):
_UpperCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_UpperCamelCase : Tuple = MaskFormerSwinConfig
def SCREAMING_SNAKE_CASE_ ( self : str )-> Any:
"""simple docstring"""
lowercase__ = MaskFormerSwinModelTester(self )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> int:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ = backbone_class(UpperCAmelCase_ )
backbone.to(UpperCAmelCase_ )
backbone.eval()
lowercase__ = backbone(**UpperCAmelCase_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ = backbone(**UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ = backbone(**UpperCAmelCase_ , output_attentions=UpperCAmelCase_ )
self.assertIsNotNone(outputs.attentions )
| 702
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = StableDiffusionSAGPipeline
_UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(a )
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]:
"""simple docstring"""
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 45
| 0
|
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
lowercase_ = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
lowercase_ = {
"jukebox": 512,
}
class SCREAMING_SNAKE_CASE (_SCREAMING_SNAKE_CASE ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_LYRIC_TOKENS_SIZES
_UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self : str , a : int , a : Tuple , a : str , a : List[Any]=["v3", "v2", "v2"] , a : Tuple=512 , a : List[Any]=5 , a : Union[str, Any]="<|endoftext|>" , **a : List[Any] , )-> List[str]:
"""simple docstring"""
lowercase__ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
super().__init__(
unk_token=A_ , n_genres=A_ , version=A_ , max_n_lyric_tokens=A_ , **A_ , )
lowercase__ = version
lowercase__ = max_n_lyric_tokens
lowercase__ = n_genres
with open(A_ , encoding='utf-8' ) as vocab_handle:
lowercase__ = json.load(A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
lowercase__ = json.load(A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
lowercase__ = json.load(A_ )
lowercase__ = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowercase__ = oov.replace(R'\-\'' , R'\-+\'' )
lowercase__ = regex.compile(A_ )
lowercase__ = {v: k for k, v in self.artists_encoder.items()}
lowercase__ = {v: k for k, v in self.genres_encoder.items()}
lowercase__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[str] , a : Optional[Any] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = [self.artists_encoder.get(A_ , 0 ) for artist in list_artists]
for genres in range(len(A_ ) ):
lowercase__ = [self.genres_encoder.get(A_ , 0 ) for genre in list_genres[genres]]
lowercase__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowercase__ = [[self.lyrics_encoder.get(A_ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[str] )-> Optional[Any]:
"""simple docstring"""
return list(A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Optional[int] , a : List[str] , a : str , **a : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self.prepare_for_tokenization(A_ , A_ , A_ )
lowercase__ = self._tokenize(A_ )
return artist, genre, lyrics
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Tuple , a : int , a : List[str] , a : List[str] = False )-> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowercase__ = artists[idx].lower()
lowercase__ = [genres[idx].lower()]
else:
lowercase__ = self._normalize(artists[idx] ) + '.v2'
lowercase__ = [
self._normalize(A_ ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowercase__ = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
lowercase__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
lowercase__ = {vocab[index]: index + 1 for index in range(len(A_ ) )}
lowercase__ = 0
lowercase__ = len(A_ ) + 1
lowercase__ = self.vocab
lowercase__ = {v: k for k, v in self.vocab.items()}
lowercase__ = ''
else:
lowercase__ = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
lowercase__ = self._run_strip_accents(A_ )
lowercase__ = lyrics.replace('\\' , '\n' )
lowercase__ = self.out_of_vocab.sub('' , A_ ), [], []
return artists, genres, lyrics
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : str )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = unicodedata.normalize('NFD' , A_ )
lowercase__ = []
for char in text:
lowercase__ = unicodedata.category(A_ )
if cat == "Mn":
continue
output.append(A_ )
return "".join(A_ )
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = (
[chr(A_ ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(A_ ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(A_ ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
lowercase__ = frozenset(A_ )
lowercase__ = re.compile(R'_+' )
lowercase__ = ''.join([c if c in accepted else '_' for c in text.lower()] )
lowercase__ = pattern.sub('_' , A_ ).strip('_' )
return text
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Optional[Any] )-> str:
"""simple docstring"""
return " ".join(A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Dict , a : Any = None , a : str = False )-> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
lowercase__ = TensorType(A_ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
lowercase__ = tf.constant
lowercase__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
lowercase__ = torch.tensor
lowercase__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
lowercase__ = jnp.array
lowercase__ = _is_jax
else:
lowercase__ = np.asarray
lowercase__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowercase__ = [inputs]
if not is_tensor(A_ ):
lowercase__ = as_tensor(A_ )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , a : Dict , a : Optional[int] , a : List[Any]="" , a : Union[str, Any]="pt" )-> BatchEncoding:
"""simple docstring"""
lowercase__ = [0, 0, 0]
lowercase__ = [artist] * len(self.version )
lowercase__ = [genres] * len(self.version )
lowercase__ , lowercase__ , lowercase__ = self.tokenize(A_ , A_ , A_ )
lowercase__ , lowercase__ , lowercase__ = self._convert_token_to_id(A_ , A_ , A_ )
lowercase__ = [-INFINITY] * len(full_tokens[-1] )
lowercase__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A_ )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Dict , a : Optional[Any] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A_ ) )
lowercase__ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A_ ) )
lowercase__ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A_ ) )
return (artists_file, genres_file, lyrics_file)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Union[str, Any] , a : str , a : List[str] )-> str:
"""simple docstring"""
lowercase__ = self.artists_decoder.get(A_ )
lowercase__ = [self.genres_decoder.get(A_ ) for genre in genres_index]
lowercase__ = [self.lyrics_decoder.get(A_ ) for character in lyric_index]
return artist, genres, lyrics
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 45
| 0
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowercase_ = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowercase_ = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowercase_ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowercase_ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowercase_ = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowercase_ = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowercase_ = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __UpperCamelCase () -> Dict:
lowercase__ = randrange(len(_lowercase ) ), randrange(len(_lowercase ) )
lowercase__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 100 ) -> Any:
return (generate_random_hand() for _ in range(_lowercase ))
@pytest.mark.parametrize('hand, expected' , _lowercase )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
assert PokerHand(_lowercase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _lowercase )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
assert PokerHand(_lowercase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _lowercase )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = PokerHand(_lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _lowercase )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
assert PokerHand(_lowercase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _lowercase )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
assert PokerHand(_lowercase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _lowercase )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
def __UpperCamelCase () -> Dict:
lowercase__ = [PokerHand(_lowercase ) for hand in SORTED_HANDS]
lowercase__ = poker_hands.copy()
shuffle(_lowercase )
lowercase__ = chain(sorted(_lowercase ) )
for index, hand in enumerate(_lowercase ):
assert hand == poker_hands[index]
def __UpperCamelCase () -> Any:
# Test that five high straights are compared correctly.
lowercase__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __UpperCamelCase () -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowercase__ = PokerHand('2C 4S AS 3D 5C' )
lowercase__ = True
lowercase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __UpperCamelCase () -> Any:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowercase__ = 0
lowercase__ = os.path.abspath(os.path.dirname(_lowercase ) )
lowercase__ = os.path.join(_lowercase , 'poker_hands.txt' )
with open(_lowercase ) as file_hand:
for line in file_hand:
lowercase__ = line[:14].strip()
lowercase__ = line[15:].strip()
lowercase__ = PokerHand(_lowercase ), PokerHand(_lowercase )
lowercase__ = player.compare_with(_lowercase )
if output == "Win":
answer += 1
assert answer == 376
| 704
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]:
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase__ = '636036'
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run['id']
break
return workflow_run_id
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read().decode('UTF-8' )
return results
| 45
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 2000000 ) -> Tuple:
lowercase__ = [0]
lowercase__ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowercase__ = 0
# the area corresponding to the grid that gives the product closest to target
lowercase__ = 0
# an estimate of b, using the quadratic formula
lowercase__ = 42
# the largest integer less than b_estimate
lowercase__ = 42
# the largest integer less than b_estimate
lowercase__ = 42
# the triangle number corresponding to b_floor
lowercase__ = 42
# the triangle number corresponding to b_ceil
lowercase__ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowercase__ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowercase__ = floor(_A )
lowercase__ = ceil(_A )
lowercase__ = triangle_numbers[b_floor]
lowercase__ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowercase__ = triangle_b_first_guess * triangle_a
lowercase__ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowercase__ = triangle_b_second_guess * triangle_a
lowercase__ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 705
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = generator.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = 'cyberpunk 2077'
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = 'A painting of a squirrel eating a burger '
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 45
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Tuple , a : List[str] , a : Any=7 , a : Optional[Any]=3 , a : Optional[Any]=10 , a : Any=18 , a : Optional[int]=30 , a : List[str]=400 , a : Optional[int]=True , a : str=None , a : Dict=True , a : int=[0.5, 0.5, 0.5] , a : List[str]=[0.5, 0.5, 0.5] , a : Tuple=None , )-> List[Any]:
"""simple docstring"""
lowercase__ = size if size is not None else {'shortest_edge': 18}
lowercase__ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = num_frames
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
lowercase__ = crop_size
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (__lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : str = VivitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int:
"""simple docstring"""
lowercase__ = VivitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Any:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowercase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
lowercase__ = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
lowercase__ = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 706
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
lowercase_ = logging.getLogger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = git.Repo(search_parent_directories=_SCREAMING_SNAKE_CASE )
lowercase__ = {
'repo_id': str(_SCREAMING_SNAKE_CASE ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'git_log.json' ) , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=4 )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if params.n_gpu <= 0:
lowercase__ = 0
lowercase__ = -1
lowercase__ = True
lowercase__ = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase__ = int(os.environ['WORLD_SIZE'] )
lowercase__ = int(os.environ['N_GPU_NODE'] )
lowercase__ = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase__ = params.world_size // params.n_gpu_per_node
lowercase__ = params.global_rank // params.n_gpu_per_node
lowercase__ = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase__ = 1
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1
lowercase__ = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase__ = params.node_id == 0 and params.local_rank == 0
lowercase__ = params.n_nodes > 1
# summary
lowercase__ = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 707
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45
| 0
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = XCLIPTextConfig()
# derive patch size from model name
lowercase__ = model_name.find('patch' )
lowercase__ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowercase__ = XCLIPVisionConfig(patch_size=__A , num_frames=__A )
if "large" in model_name:
lowercase__ = 768
lowercase__ = 3072
lowercase__ = 12
lowercase__ = 1024
lowercase__ = 4096
lowercase__ = 16
lowercase__ = 24
lowercase__ = 768
lowercase__ = 3072
if model_name == "xclip-large-patch14-16-frames":
lowercase__ = 336
lowercase__ = XCLIPConfig.from_text_vision_configs(__A , __A )
if "large" in model_name:
lowercase__ = 768
return config
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
if name == "token_embedding.weight":
lowercase__ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowercase__ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowercase__ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowercase__ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowercase__ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowercase__ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowercase__ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowercase__ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowercase__ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowercase__ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowercase__ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowercase__ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowercase__ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowercase__ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowercase__ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowercase__ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowercase__ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowercase__ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowercase__ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowercase__ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowercase__ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowercase__ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(__A )
if "attn.in_proj" in key:
lowercase__ = key.split('.' )
if key.startswith('visual' ):
lowercase__ = key_split[3]
lowercase__ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowercase__ = val[
:dim, :
]
lowercase__ = val[
dim : dim * 2, :
]
lowercase__ = val[
-dim:, :
]
else:
lowercase__ = val[
:dim
]
lowercase__ = val[
dim : dim * 2
]
lowercase__ = val[
-dim:
]
else:
if "weight" in key:
lowercase__ = val[
:dim, :
]
lowercase__ = val[
dim : dim * 2, :
]
lowercase__ = val[
-dim:, :
]
else:
lowercase__ = val[:dim]
lowercase__ = val[
dim : dim * 2
]
lowercase__ = val[-dim:]
elif key.startswith('mit' ):
lowercase__ = key_split[2]
lowercase__ = config.vision_config.mit_hidden_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
else:
lowercase__ = key_split[2]
lowercase__ = config.text_config.hidden_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[
dim : dim * 2, :
]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[
dim : dim * 2
]
lowercase__ = val[-dim:]
else:
lowercase__ = rename_key(__A )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowercase__ = val.T
lowercase__ = val
return orig_state_dict
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
if num_frames == 8:
lowercase__ = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
lowercase__ = '''eating_spaghetti.npy'''
elif num_frames == 32:
lowercase__ = '''eating_spaghetti_32_frames.npy'''
lowercase__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=__A , repo_type='dataset' , )
lowercase__ = np.load(__A )
return list(__A )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> int:
lowercase__ = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
lowercase__ = model_to_url[model_name]
lowercase__ = 8
if "16-frames" in model_name:
lowercase__ = 16
elif "shot" in model_name:
lowercase__ = 32
lowercase__ = get_xclip_config(__A , __A )
lowercase__ = XCLIPModel(__A )
model.eval()
if "drive" in checkpoint_url:
lowercase__ = '''pytorch_model.bin'''
gdown.cached_download(__A , __A , quiet=__A )
lowercase__ = torch.load(__A , map_location='cpu' )['''model''']
else:
lowercase__ = torch.hub.load_state_dict_from_url(__A )['''model''']
lowercase__ = convert_state_dict(__A , __A )
lowercase__ = XCLIPModel(__A )
lowercase__ = model.load_state_dict(__A , strict=__A )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowercase__ = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
lowercase__ = VideoMAEImageProcessor(size=__A )
lowercase__ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowercase__ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowercase__ = XCLIPProcessor(image_processor=__A , tokenizer=__A )
lowercase__ = prepare_video(__A )
lowercase__ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=__A , return_tensors='pt' , padding=__A )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowercase__ = model(**__A )
# Verify outputs
lowercase__ = outputs.logits_per_video
lowercase__ = logits_per_video.softmax(dim=1 )
print('Probs:' , __A )
# kinetics-400
if model_name == "xclip-base-patch32":
lowercase__ = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
lowercase__ = torch.tensor([[7.0_999E-04, 9.9_883E-01, 4.5_580E-04]] )
elif model_name == "xclip-base-patch16":
lowercase__ = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
lowercase__ = torch.tensor([[7.6_937E-04, 9.9_728E-01, 1.9_473E-03]] )
elif model_name == "xclip-large-patch14":
lowercase__ = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
lowercase__ = torch.tensor([[3.3_877E-04, 9.9_937E-01, 2.8_888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowercase__ = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowercase__ = torch.tensor([[3.8_554E-04, 9.9_929E-01, 3.2_754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowercase__ = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowercase__ = torch.tensor([[7.1_890E-06, 9.9_994E-01, 5.6_559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowercase__ = torch.tensor([[1.0_320E-05, 9.9_993E-01, 6.2_435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowercase__ = torch.tensor([[4.1_377E-06, 9.9_990E-01, 9.8_386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowercase__ = torch.tensor([[4.1_347E-05, 9.9_962E-01, 3.3_411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowercase__ = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowercase__ = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowercase__ = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowercase__ = torch.tensor([[9.8_219E-04, 9.9_593E-01, 3.0_863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowercase__ = torch.tensor([[3.5_082E-04, 9.9_785E-01, 1.7_966E-03]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(__A , __A , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(__A , organization='nielsr' )
processor.push_to_hub(__A , organization='nielsr' )
slow_tokenizer.push_to_hub(__A , organization='nielsr' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 708
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowercase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = {}
with open(__lowerCAmelCase , 'r' ) as file:
for line_number, line in enumerate(__lowerCAmelCase ):
lowercase__ = line.strip()
if line:
lowercase__ = line.split()
lowercase__ = line_number
lowercase__ = words[0]
lowercase__ = value
return result
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
for attribute in key.split('.' ):
lowercase__ = getattr(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCAmelCase ):
lowercase__ = PARAM_MAPPING[full_name.split('.' )[-1]]
lowercase__ = 'param'
if weight_type is not None and weight_type != "param":
lowercase__ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
lowercase__ = hf_pointer
for attribute in hf_param_name.split('.' ):
lowercase__ = getattr(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ = shape_pointer.shape
# let's reduce dimension
lowercase__ = value[0]
else:
lowercase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
lowercase__ = getattr(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ = value
else:
lowercase__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCAmelCase ):
lowercase__ = PARAM_MAPPING[full_name.split('.' )[-1]]
lowercase__ = 'param'
if weight_type is not None and weight_type != "param":
lowercase__ = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowercase__ = '.'.join([key, hf_param_name] )
else:
lowercase__ = key
lowercase__ = value if 'lm_head' in full_key else value[0]
lowercase_ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Any:
lowercase__ = False
for key, mapped_key in MAPPING.items():
lowercase__ = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(__lowerCAmelCase )[0].split('.' )[-2]
lowercase__ = mapped_key.replace('*' , __lowerCAmelCase )
if "weight_g" in name:
lowercase__ = 'weight_g'
elif "weight_v" in name:
lowercase__ = 'weight_v'
elif "bias" in name:
lowercase__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ = 'weight'
else:
lowercase__ = None
if hf_dict is not None:
rename_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return is_used
return is_used
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ = True
else:
lowercase__ = load_wavaveca_layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = full_name.split('conv_layers.' )[-1]
lowercase__ = name.split('.' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
if config_path is not None:
lowercase__ = WavaVecaConfig.from_pretrained(__lowerCAmelCase )
else:
lowercase__ = WavaVecaConfig()
if is_seq_class:
lowercase__ = read_txt_into_dict(__lowerCAmelCase )
lowercase__ = idalabel
lowercase__ = WavaVecaForSequenceClassification(__lowerCAmelCase )
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
feature_extractor.save_pretrained(__lowerCAmelCase )
elif is_finetuned:
if dict_path:
lowercase__ = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.eos_index
lowercase__ = len(target_dict.symbols )
lowercase__ = os.path.join(__lowerCAmelCase , 'vocab.json' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
lowercase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ = 0
lowercase__ = 1
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__lowerCAmelCase , )
lowercase__ = True if config.feat_extract_norm == 'layer' else False
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
lowercase__ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
lowercase__ = WavaVecaForCTC(__lowerCAmelCase )
else:
lowercase__ = WavaVecaForPreTraining(__lowerCAmelCase )
if is_finetuned or is_seq_class:
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowercase__ = argparse.Namespace(task='audio_pretraining' )
lowercase__ = fairseq.tasks.setup_task(__lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCAmelCase )
lowercase__ = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
lowercase_ = parser.parse_args()
lowercase_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 709
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = "dpr"
def __init__( self : Any , a : str=30_522 , a : Optional[int]=768 , a : Optional[Any]=12 , a : List[Any]=12 , a : List[str]=3_072 , a : Optional[Any]="gelu" , a : Optional[Any]=0.1 , a : List[str]=0.1 , a : Any=512 , a : Tuple=2 , a : Any=0.02 , a : str=1E-1_2 , a : Optional[Any]=0 , a : List[str]="absolute" , a : Any = 0 , **a : List[Any] , )-> str:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = projection_dim
lowercase__ = position_embedding_type
| 710
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = value_function
lowercase__ = unet
lowercase__ = scheduler
lowercase__ = env
lowercase__ = env.get_dataset()
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].std()
except: # noqa: E722
pass
lowercase__ = env.observation_space.shape[0]
lowercase__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
for key, val in cond.items():
lowercase__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = x.shape[0]
lowercase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ = self.scheduler._get_variance(a )
lowercase__ = torch.exp(0.5 * posterior_variance )
lowercase__ = model_std * grad
lowercase__ = 0
lowercase__ = x.detach()
lowercase__ = x + scale * grad
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
return x, y
def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.normalize(a , 'observations' )
lowercase__ = obs[None].repeat(a , axis=0 )
lowercase__ = {0: self.to_torch(a )}
lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ = randn_tensor(a , device=self.unet.device )
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
# run the diffusion process
lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
lowercase__ = y.argsort(0 , descending=a ).squeeze()
lowercase__ = x[sorted_idx]
lowercase__ = sorted_values[:, :, : self.action_dim]
lowercase__ = actions.detach().cpu().numpy()
lowercase__ = self.de_normalize(a , key='actions' )
# select the action with the highest value
if y is not None:
lowercase__ = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ = np.random.randint(0 , a )
lowercase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 45
| 0
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __UpperCamelCase () -> List[Any]:
lowercase__ = 9, 14 # noqa: F841
lowercase__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowercase__ = defaultdict(_lowerCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowercase__ = mst(_lowerCAmelCase )
lowercase__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowercase__ = tuple(answer[:2] )
lowercase__ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 711
|
from PIL import Image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 45
| 0
|
import math
import sys
import cva
import numpy as np
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
lowercase__ = math.sqrt(__lowerCAmelCase )
lowercase__ = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
lowercase__ = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
lowercase__ = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __lowerCAmelCase ):
for j in range(0 , __lowerCAmelCase ):
lowercase__ = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__lowerCAmelCase , __lowerCAmelCase )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> np.ndarray:
lowercase__ = np.zeros(img.shape )
lowercase__ = get_gauss_kernel(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowercase__ = get_slice(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase__ = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowercase__ = vec_gaussian(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ = np.multiply(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ = np.multiply(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ = np.sum(__lowerCAmelCase ) / np.sum(__lowerCAmelCase )
lowercase__ = val
return imga
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> tuple:
lowercase__ = args[1] if args[1:] else """../image_data/lena.jpg"""
lowercase__ = float(args[2] ) if args[2:] else 1.0
lowercase__ = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowercase__ = int(args[4] )
lowercase__ = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowercase__ = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase_ = parse_args(sys.argv)
lowercase_ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
lowercase_ = img / 255
lowercase_ = out.astype("""float32""")
lowercase_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase_ = out * 255
lowercase_ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 712
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {'height': 18, 'width': 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_thumbnail' ) )
self.assertTrue(hasattr(a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 45
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
lowercase__ = False
if num < 0:
lowercase__ = True
lowercase__ = -num
lowercase__ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(__UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
import math
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 45
| 0
|
import argparse
import struct
import unittest
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , a : bytes )-> None:
"""simple docstring"""
lowercase__ = data
# Initialize hash values
lowercase__ = [
0x6A_09E_667,
0xBB_67A_E85,
0x3C_6EF_372,
0xA5_4FF_53A,
0x51_0E5_27F,
0x9B_056_88C,
0x1F_83D_9AB,
0x5B_E0C_D19,
]
# Initialize round constants
lowercase__ = [
0x42_8A2_F98,
0x71_374_491,
0xB5_C0F_BCF,
0xE9_B5D_BA5,
0x39_56C_25B,
0x59_F11_1F1,
0x92_3F8_2A4,
0xAB_1C5_ED5,
0xD8_07A_A98,
0x12_835_B01,
0x24_318_5BE,
0x55_0C7_DC3,
0x72_BE5_D74,
0x80_DEB_1FE,
0x9B_DC0_6A7,
0xC1_9BF_174,
0xE4_9B6_9C1,
0xEF_BE4_786,
0x0F_C19_DC6,
0x24_0CA_1CC,
0x2D_E92_C6F,
0x4A_748_4AA,
0x5C_B0A_9DC,
0x76_F98_8DA,
0x98_3E5_152,
0xA8_31C_66D,
0xB0_032_7C8,
0xBF_597_FC7,
0xC6_E00_BF3,
0xD5_A79_147,
0x06_CA6_351,
0x14_292_967,
0x27_B70_A85,
0x2E_1B2_138,
0x4D_2C6_DFC,
0x53_380_D13,
0x65_0A7_354,
0x76_6A0_ABB,
0x81_C2C_92E,
0x92_722_C85,
0xA2_BFE_8A1,
0xA8_1A6_64B,
0xC2_4B8_B70,
0xC7_6C5_1A3,
0xD1_92E_819,
0xD6_990_624,
0xF4_0E3_585,
0x10_6AA_070,
0x19_A4C_116,
0x1E_376_C08,
0x27_487_74C,
0x34_B0B_CB5,
0x39_1C0_CB3,
0x4E_D8A_A4A,
0x5B_9CC_A4F,
0x68_2E6_FF3,
0x74_8F8_2EE,
0x78_A56_36F,
0x84_C87_814,
0x8C_C70_208,
0x90_BEF_FFA,
0xA4_506_CEB,
0xBE_F9A_3F7,
0xC6_717_8F2,
]
lowercase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a : bytes )-> bytes:
"""simple docstring"""
lowercase__ = B'\x80' + (B'\x00' * (63 - (len(_UpperCAmelCase ) + 8) % 64))
lowercase__ = struct.pack('>Q' , (len(_UpperCAmelCase ) * 8) )
return data + padding + big_endian_integer
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> None:
"""simple docstring"""
lowercase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowercase__ = list(struct.unpack('>16L' , _UpperCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowercase__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowercase__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowercase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
lowercase__ = self.ror(_UpperCAmelCase , 6 ) ^ self.ror(_UpperCAmelCase , 11 ) ^ self.ror(_UpperCAmelCase , 25 )
lowercase__ = (e & f) ^ ((~e & 0xFF_FFF_FFF) & g)
lowercase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
lowercase__ = self.ror(_UpperCAmelCase , 2 ) ^ self.ror(_UpperCAmelCase , 13 ) ^ self.ror(_UpperCAmelCase , 22 )
lowercase__ = (a & b) ^ (a & c) ^ (b & c)
lowercase__ = (sa + maj) % 0x100_000_000
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
lowercase__ = [a, b, c, d, e, f, g, h]
# Modify final values
lowercase__ = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
lowercase__ = ''.join([hex(_UpperCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def SCREAMING_SNAKE_CASE_ ( self : int , a : int , a : int )-> int:
"""simple docstring"""
return 0xFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> None:
"""simple docstring"""
import hashlib
lowercase__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_UpperCAmelCase ).hash , hashlib.shaaaa(_UpperCAmelCase ).hexdigest() )
def __UpperCamelCase () -> Any:
import doctest
doctest.testmod()
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(lowerCAmelCase__ , 'utf-8' )
print(SHAaaa(lowerCAmelCase__ ).hash )
if __name__ == "__main__":
main()
| 714
|
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 45
| 0
|
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE (metaclass=__lowerCAmelCase ):
_UpperCamelCase : List[str] = ['onnx']
def __init__( self : Optional[Any] , *a : Tuple , **a : int )-> int:
"""simple docstring"""
requires_backends(self , ['onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *a : Optional[Any] , **a : Optional[int] )-> int:
"""simple docstring"""
requires_backends(cls , ['onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *a : List[str] , **a : List[Any] )-> List[str]:
"""simple docstring"""
requires_backends(cls , ['onnx'] )
| 715
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase__ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowerCamelCase )
if number < 0:
return False
lowercase__ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> str:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(a , interpolate_pos_encoding=a )
# verify the logits
lowercase__ = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowercase__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(a )
| 45
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
lowercase_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
with open(_lowerCamelCase , 'rb' ) as f:
lowercase__ = Image.open(_lowerCamelCase )
return im.convert('RGB' )
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : List[Any] = field(
default=_A , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
_UpperCamelCase : Optional[int] = field(
default=_A , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_UpperCamelCase : List[str] = field(default=_A , metadata={'help': 'A folder containing the training data.'} )
_UpperCamelCase : Optional[Any] = field(default=_A , metadata={'help': 'A folder containing the validation data.'} )
_UpperCamelCase : List[Any] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_UpperCamelCase : Dict = field(
default=_A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_UpperCamelCase : Tuple = field(
default=_A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[Any]:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : Tuple = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_UpperCamelCase : List[Any] = field(
default=_A , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_A )} , )
_UpperCamelCase : List[Any] = field(
default=_A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCamelCase : Tuple = field(
default=_A , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_UpperCamelCase : Any = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_UpperCamelCase : Union[str, Any] = field(default=_A , metadata={'help': 'Name or path of preprocessor config.'} )
_UpperCamelCase : Union[str, Any] = field(
default=_A , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_UpperCamelCase : List[Any] = field(
default=_A , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = torch.stack([example['pixel_values'] for example in examples] )
lowercase__ = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __UpperCamelCase () -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase__ = {}
if data_args.train_dir is not None:
lowercase__ = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
lowercase__ = os.path.join(data_args.validation_dir , '**' )
lowercase__ = load_dataset(
'imagefolder' , data_files=_lowerCamelCase , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowerCamelCase ) and data_args.train_val_split > 0.0:
lowercase__ = dataset["train"].train_test_split(data_args.train_val_split )
lowercase__ = split["train"]
lowercase__ = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase__ = dataset["train"].features["labels"].names
lowercase__ = {}, {}
for i, label in enumerate(_lowerCamelCase ):
lowercase__ = str(_lowerCamelCase )
lowercase__ = label
# Load the accuracy metric from the datasets package
lowercase__ = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_SCREAMING_SNAKE_CASE ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel=_lowerCamelCase , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase__ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase__ = image_processor.size["shortest_edge"]
else:
lowercase__ = (image_processor.size["height"], image_processor.size["width"])
lowercase__ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase__ = Compose(
[
RandomResizedCrop(_lowerCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase__ = Compose(
[
Resize(_lowerCamelCase ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_SCREAMING_SNAKE_CASE ):
lowercase__ = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(_SCREAMING_SNAKE_CASE ):
lowercase__ = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowercase__ = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_lowerCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowercase__ = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_lowerCamelCase )
# Initalize our trainer
lowercase__ = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ = trainer.evaluate()
trainer.log_metrics('eval' , _lowerCamelCase )
trainer.save_metrics('eval' , _lowerCamelCase )
# Write model card and (optionally) push to hub
lowercase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
if __name__ == "__main__":
main()
| 717
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase__ , lowercase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 45
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE (_UpperCAmelCase ):
_UpperCamelCase : Optional[int] = 'falcon'
_UpperCamelCase : Dict = ['past_key_values']
def __init__( self : int , a : Dict=65_024 , a : Optional[Any]=4_544 , a : str=32 , a : Union[str, Any]=71 , a : Optional[Any]=1E-5 , a : List[str]=0.02 , a : Dict=True , a : int=0.0 , a : str=0.0 , a : List[str]=None , a : Optional[int]=False , a : Union[str, Any]=False , a : Tuple=True , a : Dict=True , a : str=False , a : List[Any]=11 , a : int=11 , **a : int , )-> Any:
"""simple docstring"""
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop('n_embed' , lowerCamelCase_ )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase__ = alibi
lowercase__ = new_decoder_architecture
lowercase__ = multi_query # Ignored when new_decoder_architecture is True
lowercase__ = parallel_attn
lowercase__ = bias
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Any:
"""simple docstring"""
return not self.alibi
| 718
|
from scipy.stats import spearmanr
import datasets
lowercase_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase_ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]:
"""simple docstring"""
lowercase__ = spearmanr(a , a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 45
| 0
|
import math
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
assert isinstance(_A , _A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowercase__ = range(3 , int(math.sqrt(_A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 , **_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = factor * value
lowercase__ = value
while not is_prime(_A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_A )
return value
| 719
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase__ = 128
elif "12-12" in model_name:
lowercase__ = 12
lowercase__ = 12
elif "14-14" in model_name:
lowercase__ = 14
lowercase__ = 14
elif "16-16" in model_name:
lowercase__ = 16
lowercase__ = 16
else:
raise ValueError('Model not supported' )
lowercase__ = 'huggingface/label-files'
if "speech-commands" in model_name:
lowercase__ = 35
lowercase__ = 'speech-commands-v2-id2label.json'
else:
lowercase__ = 527
lowercase__ = 'audioset-id2label.json'
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
if "module.v" in name:
lowercase__ = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
lowercase__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
lowercase__ = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
lowercase__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowercase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
lowercase__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowercase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase__ = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase__ = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
lowercase__ = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
lowercase__ = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
lowercase__ = key.split('.' )
lowercase__ = int(key_split[3] )
lowercase__ = config.hidden_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
else:
lowercase__ = val
return orig_state_dict
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
lowercase__ = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE )
lowercase__ = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
lowercase__ = model_name_to_url[model_name]
lowercase__ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' )
# remove some keys
remove_keys(_SCREAMING_SNAKE_CASE )
# rename some keys
lowercase__ = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load 🤗 model
lowercase__ = ASTForAudioClassification(_SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase__ = -4.2_6_7_7_3_9_3 if 'speech-commands' not in model_name else -6.8_4_5_9_7_8
lowercase__ = 4.5_6_8_9_9_7_4 if 'speech-commands' not in model_name else 5.5_6_5_4_5_2_6
lowercase__ = 1024 if 'speech-commands' not in model_name else 128
lowercase__ = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
lowercase__ = load_dataset('speech_commands' , 'v0.02' , split='validation' )
lowercase__ = dataset[0]['audio']['array']
else:
lowercase__ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
lowercase__ , lowercase__ = torchaudio.load(_SCREAMING_SNAKE_CASE )
lowercase__ = waveform.squeeze().numpy()
lowercase__ = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=16000 , return_tensors='pt' )
# forward pass
lowercase__ = model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase__ = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase__ = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase__ = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase__ = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase__ = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase__ = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase__ = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase__ = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F"""MIT/{model_name}""" )
feature_extractor.push_to_hub(F"""MIT/{model_name}""" )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 720
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowercase_ = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = EfficientNetConfig()
lowercase__ = CONFIG_MAP[model_name]['hidden_dim']
lowercase__ = CONFIG_MAP[model_name]['width_coef']
lowercase__ = CONFIG_MAP[model_name]['depth_coef']
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = CONFIG_MAP[model_name]['dropout_rate']
lowercase__ = CONFIG_MAP[model_name]['dw_padding']
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 1000
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> Tuple:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
lowercase__ = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
lowercase__ = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
lowercase__ = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ = 'efficientnet.' + item[1]
lowercase__ = 'classifier.weight'
lowercase__ = 'classifier.bias'
return key_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , )
lowercase__ = original_model.trainable_variables
lowercase__ = original_model.non_trainable_variables
lowercase__ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ = param.numpy()
lowercase__ = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowercase__ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.detach().numpy()
# Original model inference
lowercase__ = False
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowercase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 45
| 0
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Tuple , a : str , a : Union[str, Any]=13 , a : List[str]=7 , a : Tuple=True , a : List[Any]=True , a : str=True , a : Union[str, Any]=True , a : List[Any]=99 , a : Any=32 , a : Tuple=5 , a : Union[str, Any]=4 , a : Union[str, Any]=37 , a : int="gelu" , a : Any=0.1 , a : List[str]=0.1 , a : Union[str, Any]=512 , a : str=16 , a : Tuple=2 , a : List[Any]=0.02 , a : int=4 , )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
_UpperCamelCase : str = True
_UpperCamelCase : Optional[int] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> int:
"""simple docstring"""
lowercase__ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_lowercase )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_lowercase )
lowercase__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
lowercase__ = model(_lowercase )[0]
lowercase__ = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
lowercase__ = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int:
"""simple docstring"""
lowercase__ = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_lowercase )
lowercase__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
lowercase__ = model(_lowercase )[0]
# compare the actual values for a slice.
lowercase__ = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 721
|
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 45
| 0
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , a : Tuple , a : str=13 , a : List[Any]=7 , a : Dict=True , a : Optional[int]=True , a : Optional[int]=True , a : Any=True , a : Any=99 , a : Optional[int]=24 , a : Tuple=2 , a : int=6 , a : Dict=37 , a : List[str]="gelu" , a : str=0.1 , a : List[Any]=0.1 , a : Any=512 , a : Union[str, Any]=16 , a : int=2 , a : Optional[Any]=0.02 , a : Any=3 , a : Tuple=None , a : Optional[int]=1_000 , )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = range_bbox
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__ = bbox[i, j, 3]
lowercase__ = bbox[i, j, 1]
lowercase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__ = bbox[i, j, 2]
lowercase__ = bbox[i, j, 0]
lowercase__ = t
lowercase__ = None
if self.use_input_mask:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> str:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : int , a : Optional[int] , a : str , a : Union[str, Any] , a : str , a : Dict , a : Tuple , )-> List[str]:
"""simple docstring"""
lowercase__ = LiltModel(config=A_ )
model.to(A_ )
model.eval()
lowercase__ = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ )
lowercase__ = model(A_ , bbox=A_ , token_type_ids=A_ )
lowercase__ = model(A_ , bbox=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Dict , a : Union[str, Any] , a : Tuple , a : Dict , a : List[str] , a : Dict , a : int , )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = LiltForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
lowercase__ = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[str] , a : Dict , a : Optional[int] , a : Optional[int] , a : Any , a : Dict , a : Dict , )-> List[Any]:
"""simple docstring"""
lowercase__ = LiltForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
lowercase__ = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (_lowercase , _lowercase , _lowercase , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self : Any , a : List[Any] , a : Optional[Any] , a : Any , a : Dict , a : Union[str, Any] )-> int:
"""simple docstring"""
return True
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[int]:
"""simple docstring"""
lowercase__ = LiltModelTester(self )
lowercase__ = ConfigTester(self , config_class=A_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = LiltModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : int )-> int:
"""simple docstring"""
lowercase__ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(A_ )
lowercase__ = torch.tensor([[1, 2]] , device=A_ )
lowercase__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ )
# forward pass
with torch.no_grad():
lowercase__ = model(input_ids=A_ , bbox=A_ )
lowercase__ = torch.Size([1, 2, 768] )
lowercase__ = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=A_ , )
self.assertTrue(outputs.last_hidden_state.shape , A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1E-3 ) )
| 700
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> int:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowercase__ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase__ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase__ = DDPMScheduler()
lowercase__ = AudioDiffusionPipeline(vqvae=a , unet=self.dummy_unet , mel=a , scheduler=a )
lowercase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = torch.Generator(device=a ).manual_seed(42 )
lowercase__ = pipe(generator=a , steps=4 )
lowercase__ = output.audios[0]
lowercase__ = output.images[0]
lowercase__ = torch.Generator(device=a ).manual_seed(42 )
lowercase__ = pipe(generator=a , steps=4 , return_dict=a )
lowercase__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase__ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowercase__ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowercase__ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase__ = DDIMScheduler()
lowercase__ = self.dummy_vqvae_and_unet
lowercase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=a , scheduler=a )
lowercase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
np.random.seed(0 )
lowercase__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase__ = torch.Generator(device=a ).manual_seed(42 )
lowercase__ = pipe(raw_audio=a , generator=a , start_step=5 , steps=10 )
lowercase__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase__ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowercase__ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase__ = self.dummy_unet_condition
lowercase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=a , mel=a , scheduler=a )
lowercase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
np.random.seed(0 )
lowercase__ = torch.rand((1, 1, 10) )
lowercase__ = pipe(generator=a , encoding=a )
lowercase__ = output.images[0]
lowercase__ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowercase__ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> str:
"""simple docstring"""
lowercase__ = torch_device
lowercase__ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowercase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = torch.Generator(device=a ).manual_seed(42 )
lowercase__ = pipe(generator=a )
lowercase__ = output.audios[0]
lowercase__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase__ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowercase__ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 701
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
lowercase_ = """▁"""
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
_UpperCamelCase : int = BarthezTokenizer
def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = StableDiffusionSAGPipeline
_UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(a )
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]:
"""simple docstring"""
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 45
| 0
|
from math import ceil, sqrt
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 1000000 ) -> Optional[int]:
lowercase__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowercase__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowercase__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 704
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]:
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase__ = '636036'
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run['id']
break
return workflow_run_id
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read().decode('UTF-8' )
return results
| 45
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> List[Any]:
lowercase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase__ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
lowercase__ = """"""
else:
lowercase__ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[
: config.hidden_size, :
]
lowercase__ = in_proj_bias[: config.hidden_size]
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = dct.pop(_SCREAMING_SNAKE_CASE )
lowercase__ = val
def __UpperCamelCase () -> Optional[int]:
lowercase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ) -> Dict:
lowercase__ = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowercase__ = 8
# set labels if required
if not base_model:
lowercase__ = 1000
lowercase__ = """huggingface/label-files"""
lowercase__ = """imagenet-1k-id2label.json"""
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowercase__ = 384
lowercase__ = 1536
lowercase__ = 12
lowercase__ = 6
# load original model from torch hub
lowercase__ = torch.hub.load('facebookresearch/dino:main' , _SCREAMING_SNAKE_CASE )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase__ = original_model.state_dict()
if base_model:
remove_classification_head_(_SCREAMING_SNAKE_CASE )
lowercase__ = create_rename_keys(_SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
if base_model:
lowercase__ = ViTModel(_SCREAMING_SNAKE_CASE , add_pooling_layer=_SCREAMING_SNAKE_CASE ).eval()
else:
lowercase__ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor
lowercase__ = ViTImageProcessor()
lowercase__ = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase__ = encoding["""pixel_values"""]
lowercase__ = model(_SCREAMING_SNAKE_CASE )
if base_model:
lowercase__ = original_model(_SCREAMING_SNAKE_CASE )
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowercase__ = original_model(_SCREAMING_SNAKE_CASE )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
lowercase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 705
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = generator.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = 'cyberpunk 2077'
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = 'A painting of a squirrel eating a burger '
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 45
| 0
|
import unittest
from knapsack import greedy_knapsack as kp
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple:
"""simple docstring"""
lowercase__ = [10, 20, 30, 40, 50, 60]
lowercase__ = [2, 4, 6, 8, 10, 12]
lowercase__ = 100
self.assertEqual(kp.calc_profit(a_ , a_ , a_ ) , 210 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Any:
"""simple docstring"""
self.assertRaisesRegex(a_ , 'max_weight must greater than zero.' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> int:
"""simple docstring"""
self.assertRaisesRegex(a_ , 'Weight can not be negative.' )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]:
"""simple docstring"""
self.assertRaisesRegex(a_ , 'Profit can not be negative.' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Tuple:
"""simple docstring"""
self.assertRaisesRegex(a_ , 'max_weight must greater than zero.' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
self.assertRaisesRegex(
a_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 706
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
from __future__ import annotations
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> float:
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
__snake_case , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
lowercase_ = int(input("""Enter number: """).strip())
print(f'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 708
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''LayoutLMv2FeatureExtractor''']
lowercase_ = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_UpperCamelCase : List[Any] = field(
default=UpperCAmelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCamelCase : int = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
_UpperCamelCase : int = field(
default=UpperCAmelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_UpperCamelCase : Optional[int] = field(default=UpperCAmelCase__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCamelCase : List[Any] = field(
default=UpperCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
_UpperCamelCase : List[str] = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
_UpperCamelCase : Dict = field(
default=UpperCAmelCase__ , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
_UpperCamelCase : List[Any] = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCamelCase : List[Any] = field(
default=UpperCAmelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __UpperCamelCase () -> int:
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
lowercase__ = import_module('tasks' )
try:
lowercase__ = getattr(UpperCAmelCase__ , model_args.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ = token_classification_task.get_labels(data_args.labels )
lowercase__ = dict(enumerate(UpperCAmelCase__ ) )
lowercase__ = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[List[int], List[int]]:
lowercase__ = np.argmax(UpperCAmelCase__ , axis=2 )
lowercase__ , lowercase__ = preds.shape
lowercase__ = [[] for _ in range(UpperCAmelCase__ )]
lowercase__ = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ , lowercase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
lowercase__ = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
lowercase__ = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ = trainer.predict(UpperCAmelCase__ )
lowercase__ , lowercase__ = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase__ = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
lowercase__ = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
main()
if __name__ == "__main__":
main()
| 710
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = value_function
lowercase__ = unet
lowercase__ = scheduler
lowercase__ = env
lowercase__ = env.get_dataset()
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].std()
except: # noqa: E722
pass
lowercase__ = env.observation_space.shape[0]
lowercase__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
for key, val in cond.items():
lowercase__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = x.shape[0]
lowercase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ = self.scheduler._get_variance(a )
lowercase__ = torch.exp(0.5 * posterior_variance )
lowercase__ = model_std * grad
lowercase__ = 0
lowercase__ = x.detach()
lowercase__ = x + scale * grad
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
return x, y
def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.normalize(a , 'observations' )
lowercase__ = obs[None].repeat(a , axis=0 )
lowercase__ = {0: self.to_torch(a )}
lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ = randn_tensor(a , device=self.unet.device )
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
# run the diffusion process
lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
lowercase__ = y.argsort(0 , descending=a ).squeeze()
lowercase__ = x[sorted_idx]
lowercase__ = sorted_values[:, :, : self.action_dim]
lowercase__ = actions.detach().cpu().numpy()
lowercase__ = self.de_normalize(a , key='actions' )
# select the action with the highest value
if y is not None:
lowercase__ = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ = np.random.randint(0 , a )
lowercase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 45
| 0
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=10 ) -> int:
lowercase__ = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=10 ) -> str:
lowercase__ = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(_snake_case , 'schedule.bin' )
torch.save(scheduler.state_dict() , _snake_case )
lowercase__ = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[str] , a : Tuple , a : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertAlmostEqual(UpperCAmelCase_ , UpperCAmelCase_ , delta=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
lowercase__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase_ )
lowercase__ = torch.tensor([0.4, 0.2, -0.5] )
lowercase__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowercase__ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
lowercase__ = criterion(UpperCAmelCase_ , UpperCAmelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Tuple:
"""simple docstring"""
lowercase__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase_ )
lowercase__ = torch.tensor([0.4, 0.2, -0.5] )
lowercase__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowercase__ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase_ , weight_decay=0.0 , relative_step=UpperCAmelCase_ , scale_parameter=UpperCAmelCase_ , warmup_init=UpperCAmelCase_ , )
for _ in range(1_000 ):
lowercase__ = criterion(UpperCAmelCase_ , UpperCAmelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
_UpperCamelCase : Optional[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
_UpperCamelCase : Union[str, Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_UpperCamelCase : Any = 10
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Tuple , a : Optional[int] , a : Optional[Any] , a : Tuple=None )-> List[str]:
"""simple docstring"""
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertAlmostEqual(UpperCAmelCase_ , UpperCAmelCase_ , delta=UpperCAmelCase_ , msg=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Any:
"""simple docstring"""
lowercase__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowercase__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowercase__ , lowercase__ = data
lowercase__ = scheduler_func(self.optimizer , **UpperCAmelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowercase__ = unwrap_schedule(UpperCAmelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase_ , UpperCAmelCase_ , tol=1E-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
lowercase__ = scheduler_func(self.optimizer , **UpperCAmelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase_ ) # wrap to test picklability of the schedule
lowercase__ = unwrap_and_save_reload_schedule(UpperCAmelCase_ , self.num_steps )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ , msg=f"""failed for {scheduler_func} in save and reload""" )
class SCREAMING_SNAKE_CASE :
def __init__( self : Any , a : Any )-> str:
"""simple docstring"""
lowercase__ = fn
def __call__( self : Union[str, Any] , *a : Tuple , **a : List[str] )-> int:
"""simple docstring"""
return self.fn(*UpperCAmelCase_ , **UpperCAmelCase_ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = list(map(self , scheduler.lr_lambdas ) )
| 711
|
from PIL import Image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 45
| 0
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : Dict = field(
metadata={'help': 'The output directory where the model will be written.'} , )
_UpperCamelCase : str = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
_UpperCamelCase : Any = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
_UpperCamelCase : Dict = field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
_UpperCamelCase : Dict = field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def __UpperCamelCase () -> Optional[Any]:
lowercase__ = HfArgumentParser((ModelArguments,) )
(lowercase__ ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase__ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase__ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase__ = True
lowercase__ = True
lowercase__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=_lowercase , decoder_config=_lowercase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase__ = decoder_config.decoder_start_token_id
lowercase__ = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase__ = decoder_config.bos_token_id
if pad_token_id is None:
lowercase__ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase__ = decoder_config.eos_token_id
lowercase__ = decoder_start_token_id
lowercase__ = pad_token_id
lowercase__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 712
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {'height': 18, 'width': 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_thumbnail' ) )
self.assertTrue(hasattr(a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 45
| 0
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , a : Tuple )-> None:
"""simple docstring"""
lowercase__ = num_of_nodes
lowercase__ = []
lowercase__ = {}
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Any , a : Union[str, Any] , a : Optional[int] )-> None:
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Union[str, Any] )-> int:
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Dict )-> None:
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase__ = self.find_component(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Any , a : List[Any] , a : Any )-> None:
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
lowercase__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_SCREAMING_SNAKE_CASE )
elif component_size[u_node] >= component_size[v_node]:
lowercase__ = self.find_component(_SCREAMING_SNAKE_CASE )
component_size[u_node] += component_size[v_node]
self.set_component(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> None:
"""simple docstring"""
lowercase__ = []
lowercase__ = 0
lowercase__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = self.m_component[u]
lowercase__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = self.m_component[u]
lowercase__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
lowercase__ = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __UpperCamelCase () -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
import math
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 45
| 0
|
from __future__ import annotations
lowercase_ = 1.6021E-19 # units = C
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 45
| 0
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__ = 4
lowercase__ = 48
lowercase__ = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ = [6, 6, 6, 6]
lowercase__ = 60
lowercase__ = [6, 6, 6, 6]
lowercase__ = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ = 4
lowercase__ = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__ = 1
lowercase__ = 1
lowercase__ = 126
lowercase__ = 7
lowercase__ = 2_5_5.0
lowercase__ = ''
return config
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
lowercase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowercase__ = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
lowercase__ = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
lowercase__ = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
lowercase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase__ = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
lowercase__ = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
lowercase__ = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
lowercase__ = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
lowercase__ = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
lowercase__ = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
lowercase__ = 'layernorm.weight'
if name == "norm.bias":
lowercase__ = 'layernorm.bias'
if "conv_first" in name:
lowercase__ = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__ = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__ = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
lowercase__ = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
lowercase__ = name.replace('upsample.2' , 'upsample.convolution_1' )
lowercase__ = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
lowercase__ = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
lowercase__ = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
lowercase__ = 'swin2sr.' + name
return name
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
lowercase__ = key.split('.' )
lowercase__ = int(key_split[1] )
lowercase__ = int(key_split[4] )
lowercase__ = config.embed_dim
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
pass
else:
lowercase__ = val
return orig_state_dict
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = get_config(SCREAMING_SNAKE_CASE_ )
lowercase__ = SwinaSRForImageSuperResolution(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
lowercase__ = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(SCREAMING_SNAKE_CASE_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
lowercase__ = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('RGB' )
lowercase__ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__ = 126 if 'Jpeg' in checkpoint_url else 256
lowercase__ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ = transforms(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__ = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 512, 512] )
lowercase__ = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 1024, 1024] )
lowercase__ = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__ = torch.Size([1, 3, 1024, 1024] )
lowercase__ = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 512, 512] )
lowercase__ = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 1024, 1024] )
lowercase__ = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
print('Looks ok!' )
lowercase__ = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
lowercase__ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
lowercase_ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 715
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45
| 0
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase_ = 250_004
lowercase_ = 250_020
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE (__snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = MBartTokenizer
_UpperCamelCase : Tuple = MBartTokenizerFast
_UpperCamelCase : str = True
_UpperCamelCase : List[Any] = True
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = MBartTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = MBartTokenizer(A_ , keep_accents=A_ )
lowercase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(A_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Any:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowercase__ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowercase__ = tempfile.mkdtemp()
lowercase__ = tokenizer_r.save_pretrained(A_ )
lowercase__ = tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowercase__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(A_ , A_ )
# Checks everything loads correctly in the same way
lowercase__ = tokenizer_r.from_pretrained(A_ )
lowercase__ = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=True
lowercase__ = tempfile.mkdtemp()
lowercase__ = tokenizer_r.save_pretrained(A_ , legacy_format=A_ )
lowercase__ = tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files
self.assertSequenceEqual(A_ , A_ )
# Checks everything loads correctly in the same way
lowercase__ = tokenizer_r.from_pretrained(A_ )
lowercase__ = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=False
lowercase__ = tempfile.mkdtemp()
lowercase__ = tokenizer_r.save_pretrained(A_ , legacy_format=A_ )
lowercase__ = tokenizer_p.save_pretrained(A_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase__ = tokenizer_r.from_pretrained(A_ )
lowercase__ = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = 'facebook/mbart-large-en-ro'
_UpperCamelCase : List[Any] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_UpperCamelCase : Dict = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_UpperCamelCase : Optional[int] = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowercase__ = 1
return cls
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250_020 )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[str]:
"""simple docstring"""
lowercase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
self.assertIn(A_ , self.tokenizer.all_special_ids )
lowercase__ = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
lowercase__ = self.tokenizer.decode(A_ , skip_special_tokens=A_ )
lowercase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
self.assertNotIn(self.tokenizer.eos_token , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
lowercase__ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , A_ )
lowercase__ = 10
lowercase__ = self.tokenizer(A_ , max_length=A_ , truncation=A_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A_ )
self.assertEqual(len(A_ ) , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250_026, 250_001] )
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A_ )
lowercase__ = MBartTokenizer.from_pretrained(A_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A_ )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A_ , return_tensors='pt' )
lowercase__ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowercase__ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowercase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = self.tokenizer(self.src_text , padding=A_ , truncation=A_ , max_length=3 , return_tensors='pt' )
lowercase__ = self.tokenizer(
text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=10 , return_tensors='pt' )
lowercase__ = targets["input_ids"]
lowercase__ = shift_tokens_right(A_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Any:
"""simple docstring"""
lowercase__ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(A_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3_034, 2, 250_004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250_001,
} , )
| 716
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> str:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(a , interpolate_pos_encoding=a )
# verify the logits
lowercase__ = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowercase__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(a )
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase__ , lowercase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 45
| 0
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = AutoConfig.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 718
|
from scipy.stats import spearmanr
import datasets
lowercase_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase_ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]:
"""simple docstring"""
lowercase__ = spearmanr(a , a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 45
| 0
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class SCREAMING_SNAKE_CASE (__a ):
_UpperCamelCase : Any = ["image_processor", "tokenizer"]
_UpperCamelCase : Optional[Any] = "OwlViTImageProcessor"
_UpperCamelCase : Optional[Any] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Union[str, Any] , a : Optional[int]=None , a : Dict=None , **a : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase_ , )
lowercase__ = kwargs.pop('feature_extractor' )
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : Dict , a : int=None , a : Optional[int]=None , a : str=None , a : Optional[Any]="max_length" , a : Union[str, Any]="np" , **a : Any )-> int:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(text[0] , lowerCAmelCase_ )):
lowercase__ = [self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(text[0] , lowerCAmelCase_ ):
lowercase__ = []
# Maximum number of queries across batch
lowercase__ = max([len(lowerCAmelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCAmelCase_ ) != max_num_queries:
lowercase__ = t + [' '] * (max_num_queries - len(lowerCAmelCase_ ))
lowercase__ = self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
encodings.append(lowerCAmelCase_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase__ = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase__ = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase__ = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase__ = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase__ = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase__ = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase__ = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase__ = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase__ = BatchEncoding()
lowercase__ = input_ids
lowercase__ = attention_mask
if query_images is not None:
lowercase__ = BatchEncoding()
lowercase__ = self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ).pixel_values
lowercase__ = query_pixel_values
if images is not None:
lowercase__ = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , *a : Optional[int] , **a : Any )-> int:
"""simple docstring"""
return self.image_processor.post_process(*lowerCAmelCase_ , **lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , *a : Any , **a : Dict )-> int:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , *a : Any , **a : str )-> Dict:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , *a : Union[str, Any] , **a : Optional[Any] )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , *a : str , **a : int )-> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase_ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase_ , )
return self.image_processor
| 719
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
from collections import namedtuple
lowercase_ = namedtuple("""from_to""", """from_ to""")
lowercase_ = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00_454, 264.172),
'cubicyard': from_to(0.76_455, 1.30_795),
'cubicfoot': from_to(0.028, 35.3_147),
'cup': from_to(0.000_236_588, 4_226.75),
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid \'from_type\' value: {from_type!r} Supported values are:\n"""
+ ', '.join(UpperCamelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid \'to_type\' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(UpperCamelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowercase_ = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = EfficientNetConfig()
lowercase__ = CONFIG_MAP[model_name]['hidden_dim']
lowercase__ = CONFIG_MAP[model_name]['width_coef']
lowercase__ = CONFIG_MAP[model_name]['depth_coef']
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = CONFIG_MAP[model_name]['dropout_rate']
lowercase__ = CONFIG_MAP[model_name]['dw_padding']
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 1000
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> Tuple:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
lowercase__ = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
lowercase__ = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
lowercase__ = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ = 'efficientnet.' + item[1]
lowercase__ = 'classifier.weight'
lowercase__ = 'classifier.bias'
return key_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , )
lowercase__ = original_model.trainable_variables
lowercase__ = original_model.non_trainable_variables
lowercase__ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ = param.numpy()
lowercase__ = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowercase__ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.detach().numpy()
# Original model inference
lowercase__ = False
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowercase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 45
| 0
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , a : List[Any] , a : List[str]=13 , a : Union[str, Any]=30 , a : Dict=2 , a : Optional[int]=3 , a : Any=True , a : Any=True , a : Tuple=32 , a : Optional[Any]=2 , a : Tuple=4 , a : Optional[int]=37 , a : Optional[int]="gelu" , a : int=0.1 , a : List[Any]=0.1 , a : Any=10 , a : int=0.02 , a : Dict=3 , a : Optional[Any]=0.6 , a : Optional[int]=None , )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Tuple:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : Optional[Any] , a : Dict )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCAmelCase__ )
lowercase__ = model(UpperCAmelCase__ , training=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : List[str] , a : Union[str, Any] , a : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCAmelCase__ )
lowercase__ = model(UpperCAmelCase__ , training=UpperCAmelCase__ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCAmelCase__ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCAmelCase__ , training=UpperCAmelCase__ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_UpperCamelCase : Optional[Any] = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Any = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> int:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Dict:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , tf.keras.layers.Layer ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> str:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase__ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase__ )
lowercase__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase__ = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase__ = model(**UpperCAmelCase__ , noise=UpperCAmelCase__ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(a : Optional[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCAmelCase__ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCAmelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase__ )
lowercase__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase__ = prepare_numpy_arrays(UpperCAmelCase__ )
lowercase__ = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
lowercase__ = model(**UpperCAmelCase__ , noise=UpperCAmelCase__ )
self.assert_outputs_same(UpperCAmelCase__ , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Any , a : Any , a : Optional[int] )-> Tuple:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCAmelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCAmelCase__ )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(UpperCAmelCase__ , UpperCAmelCase__ ),)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCAmelCase__ , '_keras_serializable' , UpperCAmelCase__ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCAmelCase__ )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCAmelCase__ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCAmelCase__ , outputs=main_layer(UpperCAmelCase__ ) )
lowercase__ = model(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCAmelCase__ , 'keras_model.h5' )
model.save(UpperCAmelCase__ )
lowercase__ = tf.keras.models.load_model(
UpperCAmelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCAmelCase__ , tf.keras.Model )
lowercase__ = model(UpperCAmelCase__ )
self.assert_outputs_same(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase__ )
lowercase__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase__ = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase__ , saved_model=UpperCAmelCase__ )
lowercase__ = model_class.from_pretrained(UpperCAmelCase__ )
lowercase__ = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['last_hidden_state'].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['logits'].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase__ , 1E-5 )
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase__ )
lowercase__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase__ = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCAmelCase__ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCAmelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
self.assert_outputs_same(UpperCAmelCase__ , UpperCAmelCase__ )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(UpperCAmelCase__ )
def __UpperCamelCase () -> Optional[Any]:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCAmelCase__ , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCAmelCase__ , noise=UpperCAmelCase__ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 )
| 721
|
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 45
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE (__a , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = KandinskyVaaControlnetImgaImgPipeline
_UpperCamelCase : Any = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
_UpperCamelCase : Any = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
_UpperCamelCase : Union[str, Any] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCamelCase : Any = False
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Any:
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[int]:
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : str )-> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]:
"""simple docstring"""
return 100
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowercase__ = UNetaDConditionModel(**A__ )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : str )-> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
lowercase__ = self.dummy_unet
lowercase__ = self.dummy_movq
lowercase__ = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.00085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowercase__ = DDIMScheduler(**A__ )
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : Any=0 )-> Tuple:
"""simple docstring"""
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A__ ) ).to(A__ )
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A__ )
# create init_image
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(A__ ) ).to(A__ )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(A__ ) ).convert('RGB' ).resize((256, 256) )
# create hint
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(A__ ) ).to(A__ )
if str(A__ ).startswith('mps' ):
lowercase__ = torch.manual_seed(A__ )
else:
lowercase__ = torch.Generator(device=A__ ).manual_seed(A__ )
lowercase__ = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
lowercase__ = 'cpu'
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**A__ )
lowercase__ = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
lowercase__ = pipe(**self.get_dummy_inputs(A__ ) )
lowercase__ = output.images
lowercase__ = pipe(
**self.get_dummy_inputs(A__ ) , return_dict=A__ , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[Any]:
"""simple docstring"""
lowercase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowercase__ = init_image.resize((512, 512) )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowercase__ = torch.from_numpy(np.array(A__ ) ).float() / 255.0
lowercase__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowercase__ = 'A robot, 4k photo'
lowercase__ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(A__ )
lowercase__ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
lowercase__ = pipeline.to(A__ )
pipeline.set_progress_bar_config(disable=A__ )
lowercase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase__ , lowercase__ = pipe_prior(
A__ , image=A__ , strength=0.85 , generator=A__ , negative_prompt='' , ).to_tuple()
lowercase__ = pipeline(
image=A__ , image_embeds=A__ , negative_image_embeds=A__ , hint=A__ , generator=A__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A__ , A__ )
| 700
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45
| 0
|
from __future__ import annotations
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
if len(lowercase__ ) < k or k < 0:
raise ValueError('Invalid Input' )
lowercase__ = lowercase__ = sum(array[:k] )
for i in range(len(lowercase__ ) - k ):
lowercase__ = current_sum - array[i] + array[i + k]
lowercase__ = max(lowercase__ , lowercase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowercase_ = [randint(-1_000, 1_000) for i in range(100)]
lowercase_ = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 701
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
lowercase_ = """▁"""
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
_UpperCamelCase : int = BarthezTokenizer
def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 45
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowercase__ = len(set_a.intersection(lowerCAmelCase_ ) )
if alternative_union:
lowercase__ = len(lowerCAmelCase_ ) + len(lowerCAmelCase_ )
else:
lowercase__ = len(set_a.union(lowerCAmelCase_ ) )
return intersection / union
if isinstance(lowerCAmelCase_ , (list, tuple) ) and isinstance(lowerCAmelCase_ , (list, tuple) ):
lowercase__ = [element for element in set_a if element in set_b]
if alternative_union:
lowercase__ = len(lowerCAmelCase_ ) + len(lowerCAmelCase_ )
return len(lowerCAmelCase_ ) / union
else:
lowercase__ = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
return len(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
return None
if __name__ == "__main__":
lowercase_ = {'a', 'b', 'c', 'd', 'e'}
lowercase_ = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 702
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = StableDiffusionSAGPipeline
_UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(a )
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]:
"""simple docstring"""
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 45
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
_UpperCamelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowercase__ = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase__ ) + '\n' )
lowercase__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowercase__ = os.path.join(self.tmpdirname , lowercase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **a : List[Any] )-> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **a : Optional[int] )-> Optional[int]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> str:
"""simple docstring"""
lowercase__ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) )
return image_input
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowercase__ = self.get_image_processor(do_normalize=lowercase__ , padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowercase__ , return_tensors='np' )
lowercase__ = processor(images=lowercase__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
lowercase__ = """test"""
lowercase__ = processor(text=lowercase__ )
lowercase__ = tokenizer(lowercase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
lowercase__ = """test"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(lowercase__ ):
processor()
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowercase__ )
lowercase__ = tokenizer.batch_decode(lowercase__ )
lowercase__ = [seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Any:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
lowercase__ = torch.randn(1 , 27 , 38 )
lowercase__ = torch.randn(1 , 27 , 50_257 )
lowercase__ = torch.randn(1 , 27 , 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 704
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]:
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase__ = '636036'
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run['id']
break
return workflow_run_id
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read().decode('UTF-8' )
return results
| 45
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Dict )-> Dict:
"""simple docstring"""
self.test()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
lowercase__ = 0
lowercase__ = False
while not completed:
if counter == 1:
self.reset()
lowercase__ = self.advance()
if not self.does_advance(a ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
lowercase__ , lowercase__ , lowercase__ = self.update(a )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : str )-> Any:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : int )-> int:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : int )-> Tuple:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Any:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : int , a : Tuple=False )-> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[int] , a : List[int] )-> List[str]:
"""simple docstring"""
super(a , self ).__init__()
if not isinstance(a , a ) or len(a ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(a , a ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
lowercase__ = token_ids
lowercase__ = len(self.token_ids )
lowercase__ = -1 # the index of the currently fulfilled step
lowercase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : int )-> List[str]:
"""simple docstring"""
if not isinstance(a , a ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(a )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : int )-> Optional[int]:
"""simple docstring"""
if not isinstance(a , a ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(a )}""" )
lowercase__ = False
lowercase__ = False
lowercase__ = False
if self.does_advance(a ):
self.fulfilled_idx += 1
lowercase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase__ = True
lowercase__ = completed
else:
# failed to make progress.
lowercase__ = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = False
lowercase__ = 0
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str=False )-> int:
"""simple docstring"""
lowercase__ = PhrasalConstraint(self.token_ids )
if stateful:
lowercase__ = self.seqlen
lowercase__ = self.fulfilled_idx
lowercase__ = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , a : List[List[int]] , a : Dict=True )-> Optional[int]:
"""simple docstring"""
lowercase__ = max([len(a ) for one in nested_token_ids] )
lowercase__ = {}
for token_ids in nested_token_ids:
lowercase__ = root
for tidx, token_id in enumerate(a ):
if token_id not in level:
lowercase__ = {}
lowercase__ = level[token_id]
if no_subsets and self.has_subsets(a , a ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
f""" {nested_token_ids}.""" )
lowercase__ = root
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = self.trie
for current_token in current_seq:
lowercase__ = start[current_token]
lowercase__ = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE_ ( self : int , a : Dict )-> Tuple:
"""simple docstring"""
lowercase__ = self.next_tokens(a )
return len(a ) == 0
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Any )-> Optional[int]:
"""simple docstring"""
lowercase__ = list(root.values() )
if len(a ) == 0:
return 1
else:
return sum([self.count_leaves(a ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : int )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.count_leaves(a )
return len(a ) != leaf_count
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : List[Any] , a : List[List[int]] )-> int:
"""simple docstring"""
super(a , self ).__init__()
if not isinstance(a , a ) or len(a ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(a , a ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(a , a ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
lowercase__ = DisjunctiveTrie(a )
lowercase__ = nested_token_ids
lowercase__ = self.trie.max_height
lowercase__ = []
lowercase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.trie.next_tokens(self.current_seq )
if len(a ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int )-> int:
"""simple docstring"""
if not isinstance(a , a ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(a )}""" )
lowercase__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : int )-> Tuple:
"""simple docstring"""
if not isinstance(a , a ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(a )}""" )
lowercase__ = False
lowercase__ = False
lowercase__ = False
if self.does_advance(a ):
self.current_seq.append(a )
lowercase__ = True
else:
lowercase__ = True
self.reset()
lowercase__ = self.trie.reached_leaf(self.current_seq )
lowercase__ = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[Any]:
"""simple docstring"""
lowercase__ = False
lowercase__ = []
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> str:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Any=False )-> Optional[Any]:
"""simple docstring"""
lowercase__ = DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase__ = self.seqlen
lowercase__ = self.current_seq
lowercase__ = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , a : List[Constraint] )-> int:
"""simple docstring"""
lowercase__ = constraints
# max # of steps required to fulfill a given constraint
lowercase__ = max([c.seqlen for c in constraints] )
lowercase__ = len(a )
lowercase__ = False
self.init_state()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = []
lowercase__ = None
lowercase__ = [constraint.copy(stateful=a ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[Any]:
"""simple docstring"""
lowercase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase__ = constraint.advance()
if isinstance(a , a ):
token_list.append(a )
elif isinstance(a , a ):
token_list.extend(a )
else:
lowercase__ = self.inprogress_constraint.advance()
if isinstance(a , a ):
token_list.append(a )
elif isinstance(a , a ):
token_list.extend(a )
if len(a ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE_ ( self : str , a : Optional[List[int]] )-> str:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase__ , lowercase__ = self.add(a )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE_ ( self : str , a : int )-> Dict:
"""simple docstring"""
if not isinstance(a , a ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
lowercase__ , lowercase__ = False, False
if self.completed:
lowercase__ = True
lowercase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase__ , lowercase__ , lowercase__ = self.inprogress_constraint.update(a )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=a ) )
lowercase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase__ = None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(a ):
lowercase__ , lowercase__ , lowercase__ = pending_constraint.update(a )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(a )
lowercase__ = None
if not complete and stepped:
lowercase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE_ ( self : str , a : str=True )-> str:
"""simple docstring"""
lowercase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase__ = [
constraint.copy(stateful=a ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase__ = self.inprogress_constraint.copy(stateful=a )
lowercase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 705
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = generator.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = 'cyberpunk 2077'
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = 'A painting of a squirrel eating a burger '
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 45
| 0
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="attention" ) -> str:
lowercase__ = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
lowercase__ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase__ = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
lowercase__ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase__ = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
lowercase__ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase__ = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
lowercase__ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
if split_mlp_wi:
lowercase__ = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowercase__ = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowercase__ = (wi_a, wi_a)
else:
lowercase__ = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowercase__ = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , *, _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> List[Any]:
lowercase__ = traverse_util.flatten_dict(variables['target'] )
lowercase__ = {"/".join(lowercase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase__ = "encoder/encoder/mlp/wi_0/kernel" in old
print('Split MLP:' , lowercase_ )
lowercase__ = collections.OrderedDict()
# Shared embeddings.
lowercase__ = old["token_embedder/embedding"]
# Encoder.
for i in range(lowercase_ ):
# Block i, layer 0 (Self Attention).
lowercase__ = tax_layer_norm_lookup(lowercase_ , lowercase_ , 'encoder' , 'pre_attention_layer_norm' )
lowercase__ = tax_attention_lookup(lowercase_ , lowercase_ , 'encoder' , 'attention' )
lowercase__ = layer_norm
lowercase__ = k.T
lowercase__ = o.T
lowercase__ = q.T
lowercase__ = v.T
# Block i, layer 1 (MLP).
lowercase__ = tax_layer_norm_lookup(lowercase_ , lowercase_ , 'encoder' , 'pre_mlp_layer_norm' )
lowercase__ = tax_mlp_lookup(lowercase_ , lowercase_ , 'encoder' , lowercase_ )
lowercase__ = layer_norm
if split_mlp_wi:
lowercase__ = wi[0].T
lowercase__ = wi[1].T
else:
lowercase__ = wi.T
lowercase__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase__ = tax_relpos_bias_lookup(
lowercase_ , lowercase_ , 'encoder' ).T
lowercase__ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase__ = tax_relpos_bias_lookup(
lowercase_ , 0 , 'encoder' ).T
lowercase__ = tax_relpos_bias_lookup(
lowercase_ , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(lowercase_ ):
# Block i, layer 0 (Self Attention).
lowercase__ = tax_layer_norm_lookup(lowercase_ , lowercase_ , 'decoder' , 'pre_self_attention_layer_norm' )
lowercase__ = tax_attention_lookup(lowercase_ , lowercase_ , 'decoder' , 'self_attention' )
lowercase__ = layer_norm
lowercase__ = k.T
lowercase__ = o.T
lowercase__ = q.T
lowercase__ = v.T
# Block i, layer 1 (Cross Attention).
lowercase__ = tax_layer_norm_lookup(lowercase_ , lowercase_ , 'decoder' , 'pre_cross_attention_layer_norm' )
lowercase__ = tax_attention_lookup(lowercase_ , lowercase_ , 'decoder' , 'encoder_decoder_attention' )
lowercase__ = layer_norm
lowercase__ = k.T
lowercase__ = o.T
lowercase__ = q.T
lowercase__ = v.T
# Block i, layer 2 (MLP).
lowercase__ = tax_layer_norm_lookup(lowercase_ , lowercase_ , 'decoder' , 'pre_mlp_layer_norm' )
lowercase__ = tax_mlp_lookup(lowercase_ , lowercase_ , 'decoder' , lowercase_ )
lowercase__ = layer_norm
if split_mlp_wi:
lowercase__ = wi[0].T
lowercase__ = wi[1].T
else:
lowercase__ = wi.T
lowercase__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase__ = tax_relpos_bias_lookup(lowercase_ , lowercase_ , 'decoder' ).T
lowercase__ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase__ = old["decoder/logits_dense/kernel"].T
return new
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase__ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase__ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
lowercase__ = state_dict["shared.weight"]
return state_dict
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = checkpoints.load_tax_checkpoint(lowercase_ )
lowercase__ = convert_tax_to_pytorch(
lowercase_ , num_layers=config.num_layers , is_encoder_only=lowercase_ , scalable_attention=lowercase_ )
lowercase__ = make_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ , strict=lowercase_ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , ) -> Tuple:
lowercase__ = MTaConfig.from_json_file(lowercase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase__ = UMTaEncoderModel(lowercase_ )
else:
lowercase__ = UMTaForConditionalGeneration(lowercase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase_ )
print('Done' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
lowercase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 706
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
from __future__ import annotations
import os
from collections.abc import Mapping
lowercase_ = tuple[int, int]
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , a : set[int] , a : Mapping[EdgeT, int] )-> Dict:
"""simple docstring"""
lowercase__ = vertices
lowercase__ = {
(min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : EdgeT , a : int )-> Any:
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowercase__ = weight
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[str]:
"""simple docstring"""
lowercase__ = Graph({min(self.vertices )} , {} )
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowercase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowercase__ = edge
lowercase__ = weight
subgraph.add_edge(_snake_case , _snake_case )
return subgraph
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = "p107_network.txt" ) -> int:
lowercase__ = os.path.abspath(os.path.dirname(__UpperCamelCase ) )
lowercase__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
lowercase__ = {}
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
with open(__UpperCamelCase ) as f:
lowercase__ = f.read().strip().split('\n' )
lowercase__ = [line.split(',' ) for line in data]
for edgea in range(1 , len(__UpperCamelCase ) ):
for edgea in range(__UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowercase__ = int(adjaceny_matrix[edgea][edgea] )
lowercase__ = Graph(set(range(len(__UpperCamelCase ) ) ) , __UpperCamelCase )
lowercase__ = graph.prims_algorithm()
lowercase__ = sum(graph.edges.values() )
lowercase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 707
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45
| 0
|
# Algorithm for the pigeonhole sorting
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = min(_SCREAMING_SNAKE_CASE ) # min() finds the minimum value
lowercase__ = max(_SCREAMING_SNAKE_CASE ) # max() finds the maximum value
lowercase__ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowercase__ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowercase__ = 0
for count in range(_SCREAMING_SNAKE_CASE ):
while holes[count] > 0:
holes[count] -= 1
lowercase__ = count + min_val
i += 1
def __UpperCamelCase () -> str:
lowercase__ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_SCREAMING_SNAKE_CASE )
print('Sorted order is:' , ' '.join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 708
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 0
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE_ ( *a : List[Any] , **a : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Tuple , a : Optional[int] , a : Optional[int] )-> Tuple:
"""simple docstring"""
lowercase__ = DepthEstimationPipeline(model=_a , image_processor=_a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : Optional[Any] )-> str:
"""simple docstring"""
lowercase__ = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , _a )
import datasets
lowercase__ = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , _a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> str:
"""simple docstring"""
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = """Intel/dpt-large"""
lowercase__ = pipeline('depth-estimation' , model=_a )
lowercase__ = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int:
"""simple docstring"""
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 709
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple , a : List[str] , )-> Any:
"""simple docstring"""
lowercase__ = parent
lowercase__ = 13
lowercase__ = 7
lowercase__ = 30
lowercase__ = self.seq_length + self.mem_len
lowercase__ = 15
lowercase__ = True
lowercase__ = True
lowercase__ = 99
lowercase__ = [10, 50, 80]
lowercase__ = 32
lowercase__ = 32
lowercase__ = 4
lowercase__ = 8
lowercase__ = 128
lowercase__ = 2
lowercase__ = 2
lowercase__ = None
lowercase__ = 1
lowercase__ = 0
lowercase__ = 3
lowercase__ = self.vocab_size - 1
lowercase__ = 0.01
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[int]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = TFTransfoXLModel(UpperCamelCase__ )
lowercase__ = model(UpperCamelCase__ ).to_tuple()
lowercase__ = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase__ = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : List[Any] , a : Tuple , a : Tuple , a : Any )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFTransfoXLLMHeadModel(UpperCamelCase__ )
lowercase__ = model(UpperCamelCase__ ).to_tuple()
lowercase__ = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase__ = model(UpperCamelCase__ ).to_tuple()
lowercase__ = model([input_ids_a, mems_a] ).to_tuple()
lowercase__ = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase__ = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[int] , a : List[Any] , a : Dict , a : Optional[int] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = TFTransfoXLForSequenceClassification(UpperCamelCase__ )
lowercase__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(lowercase__) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Union[str, Any] = () if is_tf_available() else ()
_UpperCamelCase : str = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : str = False
_UpperCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Union[str, Any] , a : Any , a : Tuple , a : int , a : List[str] )-> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
lowercase__ = TFTransfoXLModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase__ , d_embed=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.model_tester.set_seed()
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Any:
"""simple docstring"""
self.model_tester.set_seed()
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase__ = model.get_output_embeddings()
assert isinstance(UpperCamelCase__ , tf.keras.layers.Layer )
lowercase__ = model.get_bias()
assert name is None
else:
lowercase__ = model.get_output_embeddings()
assert x is None
lowercase__ = model.get_bias()
assert name is None
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> str:
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFTransfoXLModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Any:
"""simple docstring"""
pass
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[str]:
"""simple docstring"""
lowercase__ = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
lowercase__ = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase__ = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase__ = model.generate(UpperCamelCase__ , max_length=200 , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
| 710
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = value_function
lowercase__ = unet
lowercase__ = scheduler
lowercase__ = env
lowercase__ = env.get_dataset()
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].std()
except: # noqa: E722
pass
lowercase__ = env.observation_space.shape[0]
lowercase__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
for key, val in cond.items():
lowercase__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = x.shape[0]
lowercase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ = self.scheduler._get_variance(a )
lowercase__ = torch.exp(0.5 * posterior_variance )
lowercase__ = model_std * grad
lowercase__ = 0
lowercase__ = x.detach()
lowercase__ = x + scale * grad
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
return x, y
def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.normalize(a , 'observations' )
lowercase__ = obs[None].repeat(a , axis=0 )
lowercase__ = {0: self.to_torch(a )}
lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ = randn_tensor(a , device=self.unet.device )
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
# run the diffusion process
lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
lowercase__ = y.argsort(0 , descending=a ).squeeze()
lowercase__ = x[sorted_idx]
lowercase__ = sorted_values[:, :, : self.action_dim]
lowercase__ = actions.detach().cpu().numpy()
lowercase__ = self.de_normalize(a , key='actions' )
# select the action with the highest value
if y is not None:
lowercase__ = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ = np.random.randint(0 , a )
lowercase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 45
| 0
|
import os
from datetime import datetime as dt
from github import Github
lowercase_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def __UpperCamelCase () -> Optional[Any]:
lowercase__ = Github(os.environ['GITHUB_TOKEN'] )
lowercase__ = g.get_repo('huggingface/accelerate' )
lowercase__ = repo.get_issues(state='open' )
for issue in open_issues:
lowercase__ = sorted([comment for comment in issue.get_comments()] , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=lowerCamelCase_ )
lowercase__ = comments[0] if len(lowerCamelCase_ ) > 0 else None
lowercase__ = dt.utcnow()
lowercase__ = (current_time - issue.updated_at).days
lowercase__ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 711
|
from PIL import Image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 45
| 0
|
import tensorflow as tf
from ...tf_utils import shape_list
class SCREAMING_SNAKE_CASE (tf.keras.layers.Layer ):
def __init__( self : Any , a : List[str] , a : Union[str, Any] , a : int , a : int , a : str=1 , a : int=False , **a : Dict )-> Any:
"""simple docstring"""
super().__init__(**a )
lowercase__ = vocab_size
lowercase__ = d_embed
lowercase__ = d_proj
lowercase__ = cutoffs + [vocab_size]
lowercase__ = [0] + self.cutoffs
lowercase__ = div_val
lowercase__ = self.cutoffs[0]
lowercase__ = len(self.cutoffs ) - 1
lowercase__ = self.shortlist_size + self.n_clusters
lowercase__ = keep_order
lowercase__ = []
lowercase__ = []
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : str )-> List[str]:
"""simple docstring"""
if self.n_clusters > 0:
lowercase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=a , name='cluster_weight' )
lowercase__ = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=a , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowercase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=a , name=f"""out_projs_._{i}""" , )
self.out_projs.append(a )
else:
self.out_projs.append(a )
lowercase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=a , name=f"""out_layers_._{i}_._weight""" , )
lowercase__ = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=a , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ = self.d_embed // (self.div_val**i)
lowercase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=a , name=f"""out_projs_._{i}""" )
self.out_projs.append(a )
lowercase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=a , name=f"""out_layers_._{i}_._weight""" , )
lowercase__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=a , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(a )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a : List[str] , a : str , a : str , a : Optional[Any]=None )-> str:
"""simple docstring"""
lowercase__ = x
if proj is not None:
lowercase__ = tf.einsum('ibd,ed->ibe' , a , a )
return tf.einsum('ibd,nd->ibn' , a , a ) + b
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a : Union[str, Any] , a : List[str] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = shape_list(a )
lowercase__ = tf.range(lp_size[0] , dtype=target.dtype )
lowercase__ = tf.stack([r, target] , 1 )
return tf.gather_nd(a , a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Tuple , a : List[str]=True , a : List[str]=False )-> str:
"""simple docstring"""
lowercase__ = 0
if self.n_clusters == 0:
lowercase__ = self._logit(a , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowercase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=a , logits=a )
lowercase__ = tf.nn.log_softmax(a , axis=-1 )
else:
lowercase__ = shape_list(a )
lowercase__ = []
lowercase__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowercase__ = (target >= l_idx) & (target < r_idx)
lowercase__ = tf.where(a )
lowercase__ = tf.boolean_mask(a , a ) - l_idx
if self.div_val == 1:
lowercase__ = self.out_layers[0][0][l_idx:r_idx]
lowercase__ = self.out_layers[0][1][l_idx:r_idx]
else:
lowercase__ = self.out_layers[i][0]
lowercase__ = self.out_layers[i][1]
if i == 0:
lowercase__ = tf.concat([cur_W, self.cluster_weight] , 0 )
lowercase__ = tf.concat([cur_b, self.cluster_bias] , 0 )
lowercase__ = self._logit(a , a , a , self.out_projs[0] )
lowercase__ = tf.nn.log_softmax(a )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowercase__ = tf.boolean_mask(a , a )
lowercase__ = self._gather_logprob(a , a )
else:
lowercase__ = self._logit(a , a , a , self.out_projs[i] )
lowercase__ = tf.nn.log_softmax(a )
lowercase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowercase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(a )
if target is not None:
lowercase__ = tf.boolean_mask(a , a )
lowercase__ = tf.boolean_mask(a , a )
lowercase__ = self._gather_logprob(a , a )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(a , -cur_logprob , shape_list(a ) )
lowercase__ = tf.concat(a , axis=-1 )
if target is not None:
if return_mean:
lowercase__ = tf.reduce_mean(a )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(a )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(a , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 712
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {'height': 18, 'width': 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_thumbnail' ) )
self.assertTrue(hasattr(a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 45
| 0
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = 1.5
lowercase__ = int(factor * num_class_images )
lowercase__ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=a__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase__ = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowercase__ = int(factor * num_images )
lowercase__ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=a__ , aesthetic_weight=0.1 , )
lowercase__ = 0
lowercase__ = 0
lowercase__ = tqdm(desc='downloading real regularization images' , total=a__ )
with open(F"""{class_data_dir}/caption.txt""" , 'w' ) as fa, open(F"""{class_data_dir}/urls.txt""" , 'w' ) as fa, open(
F"""{class_data_dir}/images.txt""" , 'w' ) as fa:
while total < num_class_images:
lowercase__ = class_images[count]
count += 1
try:
lowercase__ = requests.get(images['url'] )
if img.status_code == 200:
lowercase__ = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __UpperCamelCase () -> Dict:
lowercase__ = argparse.ArgumentParser('' , add_help=a__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=a__ , type=a__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=a__ , type=a__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowercase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 713
|
import math
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 45
| 0
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
lowercase__ = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
lowercase__ = components[:-1] + [test_fn.replace('.py' , '' )]
lowercase__ = '.'.join(UpperCamelCase__ )
return test_module_path
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = get_module_path(UpperCamelCase__ )
lowercase__ = importlib.import_module(UpperCamelCase__ )
return test_module
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = []
lowercase__ = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = []
lowercase__ = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
lowercase__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowercase__ = getattr(UpperCamelCase__ , 'all_model_classes' , [] )
if len(UpperCamelCase__ ) > 0:
test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = get_test_classes(UpperCamelCase__ )
lowercase__ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = test_class()
if hasattr(UpperCamelCase__ , 'setUp' ):
test.setUp()
lowercase__ = None
if hasattr(UpperCamelCase__ , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowercase__ = test.model_tester.__class__
return model_tester
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_test_classes(UpperCamelCase__ )
lowercase__ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ )
lowercase__ = []
for test_class in test_classes:
lowercase__ = get_model_tester_from_test_class(UpperCamelCase__ )
if tester_class is not None:
tester_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = get_test_classes(UpperCamelCase__ )
lowercase__ = {test_class: get_model_tester_from_test_class(UpperCamelCase__ ) for test_class in test_classes}
return test_tester_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = get_model_classes(UpperCamelCase__ )
lowercase__ = {
model_class: get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_test_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = get_model_classes(UpperCamelCase__ )
lowercase__ = {
model_class: get_tester_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o.__name__
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_json(UpperCamelCase__ ) for x in o]
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return {to_json(UpperCamelCase__ ): to_json(UpperCamelCase__ ) for k, v in o.items()}
else:
return o
| 714
|
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45
| 0
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase_ = logging.get_logger("""transformers.models.encodec""")
lowercase_ = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
lowercase_ = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
lowercase_ = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
lowercase_ = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
lowercase_ = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
lowercase_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase_ = []
lowercase_ = []
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
for attribute in key.split('.' ):
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
lowercase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
elif weight_type == "running_mean":
lowercase__ = value
elif weight_type == "running_var":
lowercase__ = value
elif weight_type == "num_batches_tracked":
lowercase__ = value
elif weight_type == "weight_ih_l0":
lowercase__ = value
elif weight_type == "weight_hh_l0":
lowercase__ = value
elif weight_type == "bias_ih_l0":
lowercase__ = value
elif weight_type == "bias_hh_l0":
lowercase__ = value
elif weight_type == "weight_ih_l1":
lowercase__ = value
elif weight_type == "weight_hh_l1":
lowercase__ = value
elif weight_type == "bias_ih_l1":
lowercase__ = value
elif weight_type == "bias_hh_l1":
lowercase__ = value
else:
lowercase__ = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase__ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowercase__ = MAPPING_24K
elif model_name == "encodec_48khz":
lowercase__ = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(F"""{name} was ignored""" )
continue
lowercase__ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowercase__ = key.split('.*.' )
if prefix in name and suffix in name:
lowercase__ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
lowercase__ = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase__ = 'weight_g'
elif "weight_v" in name:
lowercase__ = 'weight_v'
elif "weight_ih_l0" in name:
lowercase__ = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowercase__ = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowercase__ = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowercase__ = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowercase__ = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowercase__ = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowercase__ = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowercase__ = 'bias_hh_l1'
elif "bias" in name:
lowercase__ = 'bias'
elif "weight" in name:
lowercase__ = 'weight'
elif "running_mean" in name:
lowercase__ = 'running_mean'
elif "running_var" in name:
lowercase__ = 'running_var'
elif "num_batches_tracked" in name:
lowercase__ = 'num_batches_tracked'
else:
lowercase__ = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
if config_path is not None:
lowercase__ = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
lowercase__ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowercase__ = [8, 5, 4, 4]
lowercase__ = [2.2]
lowercase__ = 64
lowercase__ = 32000
lowercase__ = 2048
lowercase__ = False
lowercase__ = False
lowercase__ = False
elif model_name == "encodec_48khz":
lowercase__ = [8, 5, 4, 2]
lowercase__ = [3.0, 6.0, 1_2.0, 2_4.0]
lowercase__ = 48000
lowercase__ = 2
lowercase__ = False
lowercase__ = 'time_group_norm'
lowercase__ = True
lowercase__ = 1.0
lowercase__ = 0.0_1
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowercase__ = EncodecModel(_SCREAMING_SNAKE_CASE )
lowercase__ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowercase__ = original_checkpoint['best_state']
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 716
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> str:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(a , interpolate_pos_encoding=a )
# verify the logits
lowercase__ = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowercase__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(a )
| 45
| 0
|
class SCREAMING_SNAKE_CASE :
def __init__( self : Any , a : List[Any] )-> Any:
"""simple docstring"""
lowercase__ = val
lowercase__ = None
lowercase__ = None
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[Any] )-> str:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowercase__ = Node(__A )
else:
self.left.insert(__A )
elif val > self.val:
if self.right is None:
lowercase__ = Node(__A )
else:
self.right.insert(__A )
else:
lowercase__ = val
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if root:
inorder(root.left , _lowercase )
res.append(root.val )
inorder(root.right , _lowercase )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
if len(_lowercase ) == 0:
return arr
lowercase__ = Node(arr[0] )
for i in range(1 , len(_lowercase ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase__ = []
inorder(_lowercase , _lowercase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 717
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase__ , lowercase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 45
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = BlipImageProcessor()
lowercase__ = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
lowercase__ = BlipProcessor(_lowercase , _lowercase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Any , **a : Optional[int] )-> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).tokenizer
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **a : Tuple )-> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> int:
"""simple docstring"""
lowercase__ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowercase__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
lowercase__ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = BlipProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_lowercase , return_tensors='np' )
lowercase__ = processor(images=_lowercase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = BlipProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = """lower newer"""
lowercase__ = processor(text=_lowercase )
lowercase__ = tokenizer(_lowercase , return_token_type_ids=_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = BlipProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = BlipProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_lowercase )
lowercase__ = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = BlipProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowercase , images=_lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 718
|
from scipy.stats import spearmanr
import datasets
lowercase_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase_ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]:
"""simple docstring"""
lowercase__ = spearmanr(a , a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 45
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = ['''audio_values''', '''audio_mask''']
def __init__( self : List[Any] , a : Tuple=2_048 , a : List[str]=1 , a : int=[16, 16] , a : Optional[int]=128 , a : Dict=44_100 , a : Dict=86 , a : Union[str, Any]=2_048 , a : int=0.0 , **a : Optional[Any] , )-> List[Any]:
"""simple docstring"""
super().__init__(
feature_size=A__ , sampling_rate=A__ , padding_value=A__ , **A__ , )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A__ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=A__ , norm='slaney' , mel_scale='slaney' , ).T
def SCREAMING_SNAKE_CASE_ ( self : Any , a : int )-> Tuple:
"""simple docstring"""
lowercase__ = spectrogram(
A__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[int] , a : Any , a : str = None , a : int = True , a : Optional[int] = None , a : Union[str, Any] = False , a : Dict = False , **a : Optional[int] , )-> Any:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase__ = isinstance(A__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(A__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A__ , np.ndarray ):
lowercase__ = np.asarray(A__ , dtype=np.floataa )
elif isinstance(A__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A__ ):
lowercase__ = [np.asarray(A__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(A__ ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(A__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(A__ ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowercase__ = {"""audio_values""": padded_audio_features}
lowercase__ = BatchFeature(data=A__ , tensor_type=A__ )
return encoded_inputs
| 719
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowercase_ = logging.get_logger(__name__)
lowercase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Model type selected in the list: ' + ', '.join(__SCREAMING_SNAKE_CASE )} )
_UpperCamelCase : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
_UpperCamelCase : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCamelCase : int = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
_UpperCamelCase : int = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
_UpperCamelCase : int = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
_UpperCamelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
_UpperCamelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
_UpperCamelCase : float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
_UpperCamelCase : int = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
_UpperCamelCase : int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
_UpperCamelCase : int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class SCREAMING_SNAKE_CASE (__SCREAMING_SNAKE_CASE ):
_UpperCamelCase : Union[str, Any] = 'train'
_UpperCamelCase : List[Any] = 'dev'
class SCREAMING_SNAKE_CASE (__SCREAMING_SNAKE_CASE ):
_UpperCamelCase : SquadDataTrainingArguments
_UpperCamelCase : List[SquadFeatures]
_UpperCamelCase : Split
_UpperCamelCase : bool
def __init__( self : Dict , a : Union[str, Any] , a : int , a : Any = None , a : int = Split.train , a : Optional[Any] = False , a : Optional[Any] = None , a : Any = "pt" , )-> Any:
"""simple docstring"""
lowercase__ = args
lowercase__ = is_language_sensitive
lowercase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_a , _a ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
lowercase__ = mode
# Load data features from cache or dataset file
lowercase__ = 'v2' if args.version_2_with_negative else 'v1'
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + '.lock'
with FileLock(_a ):
if os.path.exists(_a ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(_a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase__ = self.old_features['features']
lowercase__ = self.old_features.get('dataset' , _a )
lowercase__ = self.old_features.get('examples' , _a )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
' future run' )
else:
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
lowercase__ , lowercase__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_a , )
lowercase__ = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , _a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
return len(self.features )
def __getitem__( self : int , a : List[Any] )-> Any:
"""simple docstring"""
lowercase__ = self.features[i]
lowercase__ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase__ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase__ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase__ = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase__ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 720
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowercase_ = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = EfficientNetConfig()
lowercase__ = CONFIG_MAP[model_name]['hidden_dim']
lowercase__ = CONFIG_MAP[model_name]['width_coef']
lowercase__ = CONFIG_MAP[model_name]['depth_coef']
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = CONFIG_MAP[model_name]['dropout_rate']
lowercase__ = CONFIG_MAP[model_name]['dw_padding']
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 1000
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> Tuple:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
lowercase__ = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
lowercase__ = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
lowercase__ = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ = 'efficientnet.' + item[1]
lowercase__ = 'classifier.weight'
lowercase__ = 'classifier.bias'
return key_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , )
lowercase__ = original_model.trainable_variables
lowercase__ = original_model.non_trainable_variables
lowercase__ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ = param.numpy()
lowercase__ = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowercase__ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.detach().numpy()
# Original model inference
lowercase__ = False
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowercase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 45
| 0
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Union[str, Any] , *a : Optional[Any] , a : int=None , a : Union[str, Any]=None , **a : List[str] )-> int:
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Union[str, Any]=None , a : Optional[Any]=None , a : Any=None , a : Dict = "eval" )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase__ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ = time.time()
try:
lowercase__ = eval_loop(
UpperCamelCase__ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions )
lowercase__ = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowercase__ = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ )
return metrics
def SCREAMING_SNAKE_CASE_ ( self : str , a : Union[str, Any] , a : Any , a : List[Any]=None , a : Union[str, Any] = "test" )-> List[str]:
"""simple docstring"""
lowercase__ = self.get_test_dataloader(UpperCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ = time.time()
try:
lowercase__ = eval_loop(
UpperCamelCase__ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions , 'predict' )
lowercase__ = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowercase__ = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ )
| 721
|
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 45
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE (snake_case_ ):
_UpperCamelCase : Optional[Any] = """transfo-xl"""
_UpperCamelCase : List[str] = ["""mems"""]
_UpperCamelCase : Optional[Any] = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , a : Tuple=267_735 , a : Optional[int]=[20_000, 40_000, 200_000] , a : Optional[Any]=1_024 , a : Union[str, Any]=1_024 , a : Any=16 , a : List[str]=64 , a : Tuple=4_096 , a : Union[str, Any]=4 , a : Union[str, Any]=False , a : List[Any]=18 , a : Optional[Any]=1_600 , a : Dict=1_000 , a : Union[str, Any]=True , a : str=True , a : List[str]=0 , a : Union[str, Any]=-1 , a : Optional[int]=True , a : Optional[int]=0.1 , a : List[Any]=0.0 , a : Tuple=True , a : Optional[Any]="normal" , a : int=0.01 , a : Union[str, Any]=0.01 , a : int=0.02 , a : List[Any]=1E-5 , a : Dict=0 , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = []
self.cutoffs.extend(a )
if proj_share_all_but_first:
lowercase__ = [False] + [True] * len(self.cutoffs )
else:
lowercase__ = [False] + [False] * len(self.cutoffs )
lowercase__ = d_model
lowercase__ = d_embed
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = div_val
lowercase__ = pre_lnorm
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = mem_len
lowercase__ = same_length
lowercase__ = attn_type
lowercase__ = clamp_len
lowercase__ = sample_softmax
lowercase__ = adaptive
lowercase__ = dropout
lowercase__ = dropatt
lowercase__ = untie_r
lowercase__ = init
lowercase__ = init_range
lowercase__ = proj_init_std
lowercase__ = init_std
lowercase__ = layer_norm_epsilon
super().__init__(eos_token_id=a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : str )-> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Optional[Any] )-> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 700
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45
| 0
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase_ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowercase_ = {
"""facebook/blenderbot_small-90M""": 512,
}
class SCREAMING_SNAKE_CASE (_UpperCAmelCase ):
_UpperCamelCase : int = VOCAB_FILES_NAMES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self : str , a : List[Any]=None , a : Dict=None , a : Optional[int]="<|endoftext|>" , a : int="<|endoftext|>" , a : Any="<|endoftext|>" , a : Dict=False , a : Union[str, Any]=True , **a : List[Any] , )-> Any:
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=lowercase__ , merges=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ , ) , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , **lowercase__ , )
lowercase__ = add_prefix_space
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[str] , a : int=None )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : str , a : Dict = None )-> Optional[Any]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 701
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
lowercase_ = """▁"""
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
_UpperCamelCase : int = BarthezTokenizer
def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 45
| 0
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
lowercase_ = {"""allegro/herbert-base-cased""": 514}
lowercase_ = {}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = HerbertTokenizer
def __init__( self : int , a : Dict=None , a : str=None , a : str=None , a : List[Any]="<s>" , a : Union[str, Any]="<unk>" , a : Union[str, Any]="<pad>" , a : Union[str, Any]="<mask>" , a : List[str]="</s>" , **a : List[Any] , )-> Tuple:
"""simple docstring"""
super().__init__(
a , a , tokenizer_file=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , sep_token=a , **a , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None )-> Tuple:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None , a : bool = False )-> Dict:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[int] , a : Optional[List[int]] = None )-> str:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : str , a : Optional[str] = None )-> List[Any]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 702
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = StableDiffusionSAGPipeline
_UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(a )
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]:
"""simple docstring"""
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 45
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> Any:
if not isinstance(__lowercase , __lowercase ):
lowercase__ = F"""Expected string as input, found {type(__lowercase )}"""
raise ValueError(__lowercase )
if not isinstance(__lowercase , __lowercase ):
lowercase__ = F"""Expected boolean as use_pascal parameter, found {type(__lowercase )}"""
raise ValueError(__lowercase )
lowercase__ = input_str.split('_' )
lowercase__ = 0 if use_pascal else 1
lowercase__ = words[start_index:]
lowercase__ = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowercase__ = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 45
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase__ = dict(zip(_A , range(len(_A ) ) ) )
lowercase__ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase__ = {'unk_token': '<unk>'}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
lowercase__ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
lowercase__ = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def SCREAMING_SNAKE_CASE_ ( self : str , **a : Union[str, Any] )-> Any:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_A )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **a : int )-> Optional[int]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **a : int )-> Dict:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_A )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[str]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : int )-> str:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = CLIPProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
lowercase__ = CLIPProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
lowercase__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowercase__ = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
lowercase__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = CLIPProcessor(tokenizer=_A , image_processor=_A )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_A , return_tensors='np' )
lowercase__ = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = CLIPProcessor(tokenizer=_A , image_processor=_A )
lowercase__ = 'lower newer'
lowercase__ = processor(text=_A )
lowercase__ = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Any:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = CLIPProcessor(tokenizer=_A , image_processor=_A )
lowercase__ = 'lower newer'
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = CLIPProcessor(tokenizer=_A , image_processor=_A )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_A )
lowercase__ = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = CLIPProcessor(tokenizer=_A , image_processor=_A )
lowercase__ = 'lower newer'
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 704
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]:
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase__ = '636036'
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run['id']
break
return workflow_run_id
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read().decode('UTF-8' )
return results
| 45
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class SCREAMING_SNAKE_CASE (__a ):
_UpperCamelCase : int = 0
_UpperCamelCase : bool = False
_UpperCamelCase : float = 3.0
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[str]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Any:
"""simple docstring"""
lowercase__ = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
lowercase__ = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowercase__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowercase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ = torch.nn.Linear(100, 200)
lowercase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ = ''''''
lowercase_ = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 705
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = generator.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = 'cyberpunk 2077'
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = 'A painting of a squirrel eating a burger '
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 45
| 0
|
from math import sqrt
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 10001 ) -> Any:
lowercase__ = 0
lowercase__ = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCAmelCase_ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCAmelCase_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 706
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
import torch
from diffusers import StableDiffusionPipeline
lowercase_ = """path-to-your-trained-model"""
lowercase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
lowercase_ = """A photo of sks dog in a bucket"""
lowercase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 707
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45
| 0
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (lowerCAmelCase__ ):
_UpperCamelCase : Dict = ["image_processor", "tokenizer"]
_UpperCamelCase : List[str] = "AutoImageProcessor"
_UpperCamelCase : int = "AutoTokenizer"
def __init__( self : Optional[int] , a : Dict , a : Tuple )-> int:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = self.image_processor
def __call__( self : int , a : List[Any]=None , a : Union[str, Any]=None , a : List[Any]=None , **a : Union[str, Any] )-> Tuple:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
lowercase__ = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , *a : str , **a : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Any , **a : Any )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self : str )-> Union[str, Any]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 708
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (_A ):
_UpperCamelCase : Union[str, Any] = ['pixel_values']
def __init__( self : str , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[Any] , )-> None:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowercase__ = size if size is not None else {'shortest_edge': 256}
lowercase__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase__ = get_size_dict(UpperCamelCase__ )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Any , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , )-> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase__ = get_resize_output_image_size(UpperCamelCase__ , size=size['shortest_edge'] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , )-> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(UpperCamelCase__ )
return center_crop(UpperCamelCase__ , size=(size['height'], size['width']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Any )-> np.ndarray:
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , )-> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : int , )-> Dict:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ = crop_size if crop_size is not None else self.crop_size
lowercase__ = get_size_dict(UpperCamelCase__ )
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
lowercase__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase__ = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 709
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 710
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = value_function
lowercase__ = unet
lowercase__ = scheduler
lowercase__ = env
lowercase__ = env.get_dataset()
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].std()
except: # noqa: E722
pass
lowercase__ = env.observation_space.shape[0]
lowercase__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
for key, val in cond.items():
lowercase__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = x.shape[0]
lowercase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ = self.scheduler._get_variance(a )
lowercase__ = torch.exp(0.5 * posterior_variance )
lowercase__ = model_std * grad
lowercase__ = 0
lowercase__ = x.detach()
lowercase__ = x + scale * grad
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
return x, y
def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.normalize(a , 'observations' )
lowercase__ = obs[None].repeat(a , axis=0 )
lowercase__ = {0: self.to_torch(a )}
lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ = randn_tensor(a , device=self.unet.device )
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
# run the diffusion process
lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
lowercase__ = y.argsort(0 , descending=a ).squeeze()
lowercase__ = x[sorted_idx]
lowercase__ = sorted_values[:, :, : self.action_dim]
lowercase__ = actions.detach().cpu().numpy()
lowercase__ = self.de_normalize(a , key='actions' )
# select the action with the highest value
if y is not None:
lowercase__ = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ = np.random.randint(0 , a )
lowercase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 45
| 0
|
import requests
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
lowercase__ = {'Content-Type': 'application/json'}
lowercase__ = requests.post(__snake_case , json={'text': message_body} , headers=__snake_case )
if response.status_code != 200:
lowercase__ = (
'Request to slack returned an error '
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__snake_case )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 711
|
from PIL import Image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {'height': 18, 'width': 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_thumbnail' ) )
self.assertTrue(hasattr(a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 45
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 ) -> Optional[int]:
lowercase__ = right or len(A__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(A__ , A__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
import math
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 45
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE (_UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = 'xlm'
_UpperCamelCase : Optional[int] = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self : str , a : Optional[int]=30_145 , a : Union[str, Any]=2_048 , a : List[Any]=12 , a : Optional[Any]=16 , a : Optional[int]=0.1 , a : str=0.1 , a : List[Any]=True , a : Optional[int]=False , a : Any=False , a : List[str]=False , a : List[Any]=1 , a : List[Any]=True , a : Any=512 , a : Tuple=2_048**-0.5 , a : Dict=1E-1_2 , a : Any=0.02 , a : List[str]=0 , a : Tuple=1 , a : Optional[int]=2 , a : Union[str, Any]=3 , a : Optional[Any]=5 , a : List[str]=True , a : str="first" , a : Optional[int]=True , a : Any=None , a : Tuple=True , a : Tuple=0.1 , a : List[str]=5 , a : List[str]=5 , a : Dict=0 , a : Tuple=0 , a : int=2 , a : Dict=0 , **a : Dict , )-> Tuple:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = emb_dim
lowercase__ = n_layers
lowercase__ = n_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = gelu_activation
lowercase__ = sinusoidal_embeddings
lowercase__ = causal
lowercase__ = asm
lowercase__ = n_langs
lowercase__ = use_lang_emb
lowercase__ = layer_norm_eps
lowercase__ = bos_index
lowercase__ = eos_index
lowercase__ = pad_index
lowercase__ = unk_index
lowercase__ = mask_index
lowercase__ = is_encoder
lowercase__ = max_position_embeddings
lowercase__ = embed_init_std
lowercase__ = init_std
lowercase__ = summary_type
lowercase__ = summary_use_proj
lowercase__ = summary_activation
lowercase__ = summary_proj_to_labels
lowercase__ = summary_first_dropout
lowercase__ = start_n_top
lowercase__ = end_n_top
lowercase__ = mask_token_id
lowercase__ = lang_id
if "n_words" in kwargs:
lowercase__ = kwargs["n_words"]
super().__init__(pad_token_id=a , bos_token_id=a , **a )
class SCREAMING_SNAKE_CASE (_UpperCamelCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 714
|
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 45
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.