code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = sorted(numsa + numsa )
lowercase , lowercase = divmod(len(lowerCAmelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ :List[Any] = [float(x) for x in input("Enter the elements of first array: ").split()]
lowercase__ :List[str] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 101
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("DownEncoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_=True , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = torch.nn.Convad(
lowerCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
# down
_snake_case = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_down_block(
lowerCAmelCase_ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# out
_snake_case = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = 2 * out_channels if double_z else out_channels
_snake_case = nn.Convad(block_out_channels[-1] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = x
_snake_case = self.conv_in(lowerCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
_snake_case = down_block(lowerCAmelCase_ )
# middle
_snake_case = self.mid_block(lowerCAmelCase_ )
# post-process
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("UpDecoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_="group" , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = nn.Convad(
lowerCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
_snake_case = in_channels if norm_type == 'spatial' else None
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# up
_snake_case = list(reversed(lowerCAmelCase_ ) )
_snake_case = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = reversed_block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_up_block(
lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , resnet_time_scale_shift=lowerCAmelCase_ , )
self.up_blocks.append(lowerCAmelCase_ )
_snake_case = output_channel
# out
if norm_type == "spatial":
_snake_case = SpatialNorm(block_out_channels[0] , lowerCAmelCase_ )
else:
_snake_case = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = nn.Convad(block_out_channels[0] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = z
_snake_case = self.conv_in(lowerCAmelCase_ )
_snake_case = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# middle
_snake_case = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = up_block(lowerCAmelCase_ , lowerCAmelCase_ )
# post-process
if latent_embeds is None:
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
else:
_snake_case = self.conv_norm_out(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="random" , lowerCAmelCase_=False , lowerCAmelCase_=True ):
"""simple docstring"""
super().__init__()
_snake_case = n_e
_snake_case = vq_embed_dim
_snake_case = beta
_snake_case = legacy
_snake_case = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_snake_case = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
_snake_case = self.used.shape[0]
_snake_case = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_snake_case = self.re_embed
_snake_case = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
_snake_case = n_e
_snake_case = sane_index_shape
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
_snake_case = (inds[:, :, None] == used[None, None, ...]).long()
_snake_case = match.argmax(-1 )
_snake_case = match.sum(2 ) < 1
if self.unknown_index == "random":
_snake_case = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_snake_case = self.unknown_index
return new.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_snake_case = 0 # simply set to zero
_snake_case = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase_ )
return back.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = z.permute(0 , 2 , 3 , 1 ).contiguous()
_snake_case = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_snake_case = torch.argmin(torch.cdist(lowerCAmelCase_ , self.embedding.weight ) , dim=1 )
_snake_case = self.embedding(lowerCAmelCase_ ).view(z.shape )
_snake_case = None
_snake_case = None
# compute loss for embedding
if not self.legacy:
_snake_case = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_snake_case = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_snake_case = z + (z_q - z).detach()
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_snake_case = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_snake_case = self.remap_to_used(lowerCAmelCase_ )
_snake_case = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_snake_case = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if self.remap is not None:
_snake_case = indices.reshape(shape[0] , -1 ) # add batch axis
_snake_case = self.unmap_to_all(lowerCAmelCase_ )
_snake_case = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_snake_case = self.embedding(lowerCAmelCase_ )
if shape is not None:
_snake_case = z_q.view(lowerCAmelCase_ )
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = parameters
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , dim=1 )
_snake_case = torch.clamp(self.logvar , -30.0 , 20.0 )
_snake_case = deterministic
_snake_case = torch.exp(0.5 * self.logvar )
_snake_case = torch.exp(self.logvar )
if self.deterministic:
_snake_case = _snake_case = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase ( self , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = randn_tensor(
self.mean.shape , generator=lowerCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_snake_case = self.mean + self.std * sample
return x
def lowerCamelCase ( self , lowerCAmelCase_=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
_snake_case = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
return self.mean
| 42
| 0
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {"""vocab_file""": """vocab.json"""}
SCREAMING_SNAKE_CASE : Any = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
SCREAMING_SNAKE_CASE : Any = {"""mgp-str""": 27}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , a_ , a_="[GO]" , a_="[GO]" , a_="[s]" , a_="[GO]" , **a_ ):
'''simple docstring'''
super().__init__(
unk_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , **a_ , )
with open(a_ , encoding='''utf-8''' ) as vocab_handle:
__snake_case : str = json.load(a_ )
__snake_case : str = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return len(self.vocab )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = []
for s in text:
char_tokens.extend(a_ )
return char_tokens
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.vocab.get(a_ , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.decoder.get(a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(a_ ) )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=a_ , ensure_ascii=a_ ) + '''\n''' )
return (vocab_file,)
| 102
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
A__ : List[str] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __snake_case ( UpperCamelCase_ ):
_a = '''data2vec-text'''
def __init__( self : Optional[Any] , A_ : Any=3_0_5_2_2 , A_ : str=7_6_8 , A_ : Tuple=1_2 , A_ : Tuple=1_2 , A_ : int=3_0_7_2 , A_ : Any="gelu" , A_ : int=0.1 , A_ : Dict=0.1 , A_ : Union[str, Any]=5_1_2 , A_ : List[str]=2 , A_ : int=0.02 , A_ : Tuple=1e-12 , A_ : Dict=1 , A_ : str=0 , A_ : Any=2 , A_ : str="absolute" , A_ : str=True , A_ : List[Any]=None , **A_ : List[Any] , ):
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_)
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : Optional[int] = hidden_size
lowerCAmelCase_ : Optional[int] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : Optional[Any] = hidden_act
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = type_vocab_size
lowerCAmelCase_ : Optional[Any] = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : Any = position_embedding_type
lowerCAmelCase_ : Any = use_cache
lowerCAmelCase_ : List[str] = classifier_dropout
class __snake_case ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : Optional[int]):
if self.task == "multiple-choice":
lowerCAmelCase_ : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 103
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
lowercase : Tuple = parser.parse_args()
lowercase : Optional[int] = "cpu"
lowercase : Optional[Any] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
lowercase : Optional[int] = "path-to-your-trained-model"
lowercase : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase : Dict = pipe.to(device)
# to channels last
lowercase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
lowercase : int = pipe.vae.to(memory_format=torch.channels_last)
lowercase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase : Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase : Any = torch.randn(2, 4, 64, 64)
lowercase : Optional[int] = torch.rand(1) * 999
lowercase : Optional[Any] = torch.randn(2, 77, 768)
lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
lowercase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Optional[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase : List[str] = 666
lowercase : Tuple = torch.Generator(device).manual_seed(seed)
lowercase : Union[str, Any] = {"generator": generator}
if args.steps is not None:
lowercase : Dict = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase : List[str] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 42
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __UpperCAmelCase ( _lowerCamelCase ):
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return 0.0
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[int | float, int | float]:
_snake_case = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_snake_case = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> None:
_snake_case = 512
_snake_case = [1] + [0] * (size - 1)
_snake_case = [filter_type.process(__A ) for item in inputs]
_snake_case = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case = np.abs(np.fft.fft(__A ) )
_snake_case = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
_snake_case = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(__A )
plt.show()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> None:
_snake_case = 512
_snake_case = [1] + [0] * (size - 1)
_snake_case = [filter_type.process(__A ) for item in inputs]
_snake_case = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 42
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Tuple=False ) ->List[str]:
'''simple docstring'''
a : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : int , _lowercase : List[Any]=False ) ->int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
a : List[Any] = ""
else:
a : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a : Optional[int] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
a : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a : int = in_proj_weight[
: config.hidden_size, :
]
a : Optional[Any] = in_proj_bias[: config.hidden_size]
a : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a : Dict = in_proj_weight[
-config.hidden_size :, :
]
a : Optional[int] = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Any:
'''simple docstring'''
a : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->Tuple:
'''simple docstring'''
a : Optional[int] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : str , _lowercase : Any ) ->List[Any]:
'''simple docstring'''
a : Union[str, Any] = dct.pop(_lowercase )
a : Optional[Any] = val
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Dict ) ->List[str]:
'''simple docstring'''
a : Tuple = ViTMSNConfig()
a : List[Any] = 1000
a : str = "datasets/huggingface/label-files"
a : Optional[Any] = "imagenet-1k-id2label.json"
a : str = json.load(open(hf_hub_download(_lowercase , _lowercase ) , "r" ) )
a : Optional[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
a : Dict = idalabel
a : Union[str, Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
a : Any = 384
a : Any = 1536
a : Optional[int] = 6
elif "l16" in checkpoint_url:
a : str = 1024
a : Any = 4096
a : str = 24
a : List[str] = 16
a : Any = 0.1
elif "b4" in checkpoint_url:
a : Optional[Any] = 4
elif "l7" in checkpoint_url:
a : Dict = 7
a : int = 1024
a : List[str] = 4096
a : Optional[int] = 24
a : int = 16
a : List[Any] = 0.1
a : List[str] = ViTMSNModel(_lowercase )
a : Union[str, Any] = torch.hub.load_state_dict_from_url(_lowercase , map_location="cpu" )["target_encoder"]
a : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowercase )
a : int = create_rename_keys(_lowercase , base_model=_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_q_k_v(_lowercase , _lowercase , base_model=_lowercase )
model.load_state_dict(_lowercase )
model.eval()
a : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Union[str, Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
a : List[Any] = ViTImageProcessor(
size=config.image_size , image_mean=_lowercase , image_std=_lowercase )
a : List[Any] = image_processor(images=_lowercase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
a : List[Any] = model(**_lowercase )
a : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
a : Dict = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
a : Union[str, Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
a : Any = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
a : Optional[Any] = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
a : Tuple = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowercase , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a : Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 105
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False , **lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = vocab_size
_snake_case = d_embed
_snake_case = d_proj
_snake_case = cutoffs + [vocab_size]
_snake_case = [0] + self.cutoffs
_snake_case = div_val
_snake_case = self.cutoffs[0]
_snake_case = len(self.cutoffs ) - 1
_snake_case = self.shortlist_size + self.n_clusters
_snake_case = keep_order
_snake_case = []
_snake_case = []
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.n_clusters > 0:
_snake_case = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_weight' )
_snake_case = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_snake_case = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case = self.d_embed // (self.div_val**i)
_snake_case = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' )
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = x
if proj is not None:
_snake_case = tf.einsum('ibd,ed->ibe' , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum('ibd,nd->ibn' , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = tf.range(lp_size[0] , dtype=target.dtype )
_snake_case = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = 0
if self.n_clusters == 0:
_snake_case = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_snake_case = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = []
_snake_case = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_snake_case = (target >= l_idx) & (target < r_idx)
_snake_case = tf.where(lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
_snake_case = self.out_layers[0][0][l_idx:r_idx]
_snake_case = self.out_layers[0][1][l_idx:r_idx]
else:
_snake_case = self.out_layers[i][0]
_snake_case = self.out_layers[i][1]
if i == 0:
_snake_case = tf.concat([cur_W, self.cluster_weight] , 0 )
_snake_case = tf.concat([cur_b, self.cluster_bias] , 0 )
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
_snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
_snake_case = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
_snake_case = tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
_snake_case = tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 42
| 0
|
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__UpperCamelCase : Optional[Any] = '''scheduler_config.json'''
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 1
lowercase__ = 2
lowercase__ = 3
lowercase__ = 4
lowercase__ = 5
@dataclass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = SCHEDULER_CONFIG_NAME
lowercase__ = ["dtype"]
lowercase__ = []
lowercase__ = True
@classmethod
def __lowerCAmelCase ( cls : List[Any] ,lowercase_ : Dict[str, Any] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[int]=False ,**lowercase_ : Any ,):
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = cls.load_config(
pretrained_model_name_or_path=lowercase_ ,subfolder=lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = cls.from_config(lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ )
if hasattr(lowercase_ ,'''create_state''' ) and getattr(lowercase_ ,'''has_state''' ,lowercase_ ):
lowerCAmelCase__ : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Union[str, os.PathLike] ,lowercase_ : bool = False ,**lowercase_ : str ):
self.save_config(save_directory=lowercase_ ,push_to_hub=lowercase_ ,**lowercase_ )
@property
def __lowerCAmelCase ( self : List[str] ):
return self._get_compatibles()
@classmethod
def __lowerCAmelCase ( cls : List[Any] ):
lowerCAmelCase__ : Tuple = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase__ : Tuple = importlib.import_module(__name__.split('''.''' )[0] )
lowerCAmelCase__ : Union[str, Any] = [
getattr(lowercase_ ,lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ ,lowercase_ )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
assert len(A_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(A_ ) - x.ndim) ) , A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_=0.999 , A_=jnp.floataa ):
def alpha_bar(A_ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCAmelCase__ : Optional[Any] = []
for i in range(A_ ):
lowerCAmelCase__ : str = i / num_diffusion_timesteps
lowerCAmelCase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(A_ ) / alpha_bar(A_ ) , A_ ) )
return jnp.array(A_ , dtype=A_ )
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Optional[int] = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase__ : Any = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase__ : Union[str, Any] = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : int = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : List[Any] = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
lowerCAmelCase__ : str = 1.0 - betas
lowerCAmelCase__ : Union[str, Any] = jnp.cumprod(lowercase_ ,axis=0 )
return cls(
alphas=lowercase_ ,betas=lowercase_ ,alphas_cumprod=lowercase_ ,)
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Any = state.alphas_cumprod
lowerCAmelCase__ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase__ : Tuple = sqrt_alpha_prod.flatten()
lowerCAmelCase__ : str = broadcast_to_shape_from_left(A_ , original_samples.shape )
lowerCAmelCase__ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase__ : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase__ : Optional[int] = broadcast_to_shape_from_left(A_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : Union[str, Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 106
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : Dict = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, float]:
_snake_case = len([g for position, g in enumerate(__A ) if g == main_target[position]] )
return (item, float(__A ))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, str]:
_snake_case = random.randint(0 , len(__A ) - 1 )
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = list(__A )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case = random.choice(__A )
return "".join(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , ) -> list[str]:
_snake_case = []
# Generate more children proportionally to the fitness score.
_snake_case = int(parent_a[1] * 100 ) + 1
_snake_case = 10 if child_n >= 10 else child_n
for _ in range(__A ):
_snake_case = population_score[random.randint(0 , __A )][0]
_snake_case , _snake_case = crossover(parent_a[0] , __A )
# Append new string to the population list.
pop.append(mutate(__A , __A ) )
pop.append(mutate(__A , __A ) )
return pop
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_snake_case = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(__A )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(__A )
# Generate random starting population.
_snake_case = []
for _ in range(__A ):
population.append(''.join([random.choice(__A ) for i in range(len(__A ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case , _snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__A )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case = [evaluate(__A , __A ) for item in population]
# Check if there is a matching evolution.
_snake_case = sorted(__A , key=lambda __A : x[1] , reverse=__A )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__A )
# Normalize population score to be between 0 and 1.
_snake_case = [
(item, score / len(__A )) for item, score in population_score
]
# This is selection
for i in range(__A ):
population.extend(select(population_score[int(__A )] , __A , __A ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__A ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowercase , lowercase , lowercase : Tuple = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 42
| 0
|
def __magic_name__ ( A : list ):
'''simple docstring'''
def merge(A : list, A : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(A ) <= 1:
return collection
a = len(A ) // 2
return merge(merge_sort(collection[:mid] ), merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[Any] = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 107
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["ChineseCLIPFeatureExtractor"]
lowercase : List[Any] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = 1
_snake_case = 2
while i * i <= n:
_snake_case = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_snake_case = 1
_snake_case = 1
while True:
i += 1
t_num += i
if count_divisors(__A ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 42
| 0
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
UpperCAmelCase : int = ksize + 1
UpperCAmelCase : Any = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(UpperCamelCase ):
for x in range(UpperCamelCase ):
# distance from center
UpperCAmelCase : int = x - ksize // 2
UpperCAmelCase : Optional[int] = y - ksize // 2
# degree to radiant
UpperCAmelCase : List[str] = theta / 180 * np.pi
UpperCAmelCase : str = np.cos(_theta )
UpperCAmelCase : Optional[Any] = np.sin(_theta )
# get kernel x
UpperCAmelCase : List[Any] = cos_theta * px + sin_theta * py
# get kernel y
UpperCAmelCase : Any = -sin_theta * px + cos_theta * py
# fill kernel
UpperCAmelCase : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A: Any = imread("../image_data/lena.jpg")
# turn image in gray scale value
A: Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A: List[str] = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
A: List[str] = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A: str = out / out.max() * 2_5_5
A: List[Any] = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 109
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __UpperCAmelCase ( _lowerCamelCase ):
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='fill-mask' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_snake_case = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='fill-mask' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
_snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_snake_case = self.get_env()
_snake_case = '1'
_snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import AutoModel\n '
_snake_case = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 42
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'vocab_file': 'spiece.model'}
lowerCAmelCase = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
lowerCAmelCase = {'bert_for_seq_generation': 512}
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[int] = []
_lowercase : str = ['''input_ids''', '''attention_mask''']
def __init__( self: str , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any]="<s>" , UpperCamelCase_: str="</s>" , UpperCamelCase_: Union[str, Any]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[Any]="<::::>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: Any ) -> str:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self: Dict , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Tuple ) -> Dict:
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.sp_model.IdToPiece(UpperCamelCase_ )
return token
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
lowercase__ = []
else:
current_sub_tokens.append(UpperCamelCase_ )
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def lowerCamelCase_ ( self: str , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 110
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCAmelCase_ ).to(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained('google/mt5-small' )
_snake_case = tokenizer('Hello there' , return_tensors='pt' ).input_ids
_snake_case = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
_snake_case = model(input_ids.to(lowerCAmelCase_ ) , labels=labels.to(lowerCAmelCase_ ) ).loss
_snake_case = -(labels.shape[-1] * loss.item())
_snake_case = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 42
| 0
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCamelCase : List[str] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase : Optional[int] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__UpperCamelCase : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__UpperCamelCase : List[str] = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
__UpperCamelCase : List[str] = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = None
# source code of `config_class`
SCREAMING_SNAKE_CASE : Dict = inspect.getsource(__A )
SCREAMING_SNAKE_CASE : str = _re_checkpoint.findall(__A )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
SCREAMING_SNAKE_CASE : List[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE : Optional[Any] = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE : Tuple = ckpt_name
break
return checkpoint
def A ( ):
SCREAMING_SNAKE_CASE : List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
SCREAMING_SNAKE_CASE : Dict = get_checkpoint_from_config_class(__A )
SCREAMING_SNAKE_CASE : Optional[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__A )
if len(__A ) > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(sorted(__A ) )
raise ValueError(f"""The following configurations don\'t contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 182
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : List[str] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase__ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowercase__ = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def A__ ( ):
if os.name == "nt":
_UpperCamelCase : Any = CursorInfo()
_UpperCamelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__A , ctypes.byref(__A ) )
_UpperCamelCase : List[Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__A , ctypes.byref(__A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def A__ ( ):
if os.name == "nt":
_UpperCamelCase : Union[str, Any] = CursorInfo()
_UpperCamelCase : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__A , ctypes.byref(__A ) )
_UpperCamelCase : int = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__A , ctypes.byref(__A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def A__ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 83
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowercase : set[int] = {ord(char) for char in VALID_CHARS}
lowercase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str | None:
_snake_case = ""
_snake_case = 42
_snake_case = 42
_snake_case = 42
for keychar, cipherchar in zip(cycle(__A ) , __A ):
_snake_case = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__A )
return decoded
def SCREAMING_SNAKE_CASE__ ( __A ) -> list[str]:
_snake_case = []
for key in product(__A , repeat=3 ):
_snake_case = try_key(__A , __A )
if encoded is not None:
possibles.append(__A )
return possibles
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__ ( __A = "p059_cipher.txt" ) -> int:
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = Path(__A ).parent.joinpath(__A ).read_text(encoding='utf-8' )
_snake_case = [int(__A ) for number in data.strip().split(',' )]
_snake_case = filter_valid_chars(__A )
for common_word in COMMON_WORDS:
_snake_case = filter_common_word(__A , __A )
if len(__A ) == 1:
break
_snake_case = possibles[0]
return sum(ord(__A ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __A (_lowerCamelCase):
'''simple docstring'''
__lowercase: str = 42
class __A (_lowerCamelCase , _lowerCamelCase):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , UpperCAmelCase_ : List[str] = 65_536 , UpperCAmelCase_ : List[str] = None , UpperCAmelCase_ : List[str] = 2 , UpperCAmelCase_ : Any = 2 , UpperCAmelCase_ : Dict = 0 , UpperCAmelCase_ : int = "fourier" , UpperCAmelCase_ : Dict = True , UpperCAmelCase_ : int = False , UpperCAmelCase_ : List[Any] = 0.0 , UpperCAmelCase_ : List[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase_ : Union[str, Any] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase_ : Dict = "UNetMidBlock1D" , UpperCAmelCase_ : int = None , UpperCAmelCase_ : Any = (32, 32, 64) , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Dict = 8 , UpperCAmelCase_ : Union[str, Any] = 1 , UpperCAmelCase_ : Optional[int] = False , ) ->Any:
"""simple docstring"""
super().__init__()
snake_case_ = sample_size
# time
if time_embedding_type == "fourier":
snake_case_ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase_ , log=lowerCAmelCase_ , flip_sin_to_cos=lowerCAmelCase_ )
snake_case_ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case_ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase_ , downscale_freq_shift=lowerCAmelCase_ )
snake_case_ = block_out_channels[0]
if use_timestep_embedding:
snake_case_ = block_out_channels[0] * 4
snake_case_ = TimestepEmbedding(
in_channels=lowerCAmelCase_ , time_embed_dim=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , out_dim=block_out_channels[0] , )
snake_case_ = nn.ModuleList([] )
snake_case_ = None
snake_case_ = nn.ModuleList([] )
snake_case_ = None
# down
snake_case_ = in_channels
for i, down_block_type in enumerate(lowerCAmelCase_ ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case_ = i == len(lowerCAmelCase_ ) - 1
snake_case_ = get_down_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
snake_case_ = get_mid_block(
lowerCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase_ , add_downsample=lowerCAmelCase_ , )
# up
snake_case_ = list(reversed(lowerCAmelCase_ ) )
snake_case_ = reversed_block_out_channels[0]
if out_block_type is None:
snake_case_ = out_channels
else:
snake_case_ = block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
snake_case_ = output_channel
snake_case_ = (
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase_ ) - 1 else final_upsample_channels
)
snake_case_ = i == len(lowerCAmelCase_ ) - 1
snake_case_ = get_up_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase_ )
snake_case_ = output_channel
# out
snake_case_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case_ = get_out_block(
out_block_type=lowerCAmelCase_ , num_groups_out=lowerCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any = True , ) ->Any:
"""simple docstring"""
snake_case_ = timestep
if not torch.is_tensor(lowerCAmelCase_ ):
snake_case_ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase_ ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps[None].to(sample.device )
snake_case_ = self.time_proj(lowerCAmelCase_ )
if self.config.use_timestep_embedding:
snake_case_ = self.time_mlp(lowerCAmelCase_ )
else:
snake_case_ = timestep_embed[..., None]
snake_case_ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case_ = ()
for downsample_block in self.down_blocks:
snake_case_ , snake_case_ = downsample_block(hidden_states=lowerCAmelCase_ , temb=lowerCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case_ = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case_ = down_block_res_samples[-1:]
snake_case_ = down_block_res_samples[:-1]
snake_case_ = upsample_block(lowerCAmelCase_ , res_hidden_states_tuple=lowerCAmelCase_ , temb=lowerCAmelCase_ )
# 5. post-process
if self.out_block:
snake_case_ = self.out_block(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase_ )
| 347
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A = 1_000_000 ) -> int:
_snake_case = limit + 1
_snake_case = [0] * limit
for first_term in range(1 , __A ):
for n in range(__A , __A , __A ):
_snake_case = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_snake_case = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Tuple = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE__ ( __A = 1_500_000 ) -> int:
_snake_case = defaultdict(__A )
_snake_case = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ):
if gcd(__A , __A ) > 1:
continue
_snake_case = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__A , limit + 1 , __A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Union[str, Any] = set()
lowercase : List[str] = []
def parse_line(__magic_name__ ):
for line in fp:
if isinstance(__A , __A ):
lowercase : Optional[Any] = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(__A ) > 0:
lowercase : Dict = '''\n'''.join(__A )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(__A )
buffer.clear()
continue
else:
lowercase : Any = line.strip()
buffer.append(__A )
if from_gh:
for filename in os.listdir(__A ):
lowercase : str = os.path.join(__A , __A )
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with open(__A ) as fp:
parse_line(__A )
else:
try:
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__A ) as fp:
parse_line(__A )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def snake_case( __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : int = set()
lowercase : int = [os.path.join(__A , __A ) for p in os.listdir(__A ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__A , __A ) )
return selected_warnings
if __name__ == "__main__":
def snake_case( __magic_name__ ) -> List[str]:
'''simple docstring'''
return values.split(''',''' )
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowerCAmelCase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowerCAmelCase_ = extract_warnings(args.output_dir, args.targets)
lowerCAmelCase_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 308
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowercase : Optional[Any] = False
class __UpperCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger '
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = generator.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger '
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
_snake_case = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 42
| 0
|
"""simple docstring"""
def a__ ( snake_case__ ) -> int:
if not isinstance(__A , __A ):
lowerCamelCase = F'Input value of [number={number}] must be an integer'
raise TypeError(__A )
if number < 1:
lowerCamelCase = F'Input value of [number={number}] must be > 0'
raise ValueError(__A )
lowerCamelCase = 1
for i in range(1 , __A ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A = 100 ) -> int:
_snake_case = n * (n + 1) * (2 * n + 1) / 6
_snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : List[str] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : str = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Dict:
for attribute in key.split('.' ):
_snake_case = getattr(__A , __A )
if weight_type is not None:
_snake_case = getattr(__A , __A ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
_snake_case = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__A )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , __A )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "weight" in name:
_snake_case = 'weight'
elif "bias" in name:
_snake_case = 'bias'
else:
_snake_case = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> int:
_snake_case = full_name.split('conv_layers.' )[-1]
_snake_case = name.split('.' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = SEWConfig()
if is_finetuned:
_snake_case = model.wav_encoder.wav_model.cfg
else:
_snake_case = model.cfg
_snake_case = fs_config.conv_bias
_snake_case = eval(fs_config.conv_feature_layers )
_snake_case = [x[0] for x in conv_layers]
_snake_case = [x[1] for x in conv_layers]
_snake_case = [x[2] for x in conv_layers]
_snake_case = 'gelu'
_snake_case = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
_snake_case = 0.0
_snake_case = fs_config.activation_fn.name
_snake_case = fs_config.encoder_embed_dim
_snake_case = 0.0_2
_snake_case = fs_config.encoder_ffn_embed_dim
_snake_case = 1e-5
_snake_case = fs_config.encoder_layerdrop
_snake_case = fs_config.encoder_attention_heads
_snake_case = fs_config.conv_pos_groups
_snake_case = fs_config.conv_pos
_snake_case = len(__A )
_snake_case = fs_config.encoder_layers
_snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_snake_case = model.cfg
_snake_case = fs_config.final_dropout
_snake_case = fs_config.layerdrop
_snake_case = fs_config.activation_dropout
_snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_snake_case = fs_config.attention_dropout
_snake_case = fs_config.dropout_input
_snake_case = fs_config.dropout
_snake_case = fs_config.mask_channel_length
_snake_case = fs_config.mask_channel_prob
_snake_case = fs_config.mask_length
_snake_case = fs_config.mask_prob
_snake_case = 'Wav2Vec2FeatureExtractor'
_snake_case = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=None , __A=None , __A=True ) -> List[str]:
if is_finetuned:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_snake_case = SEWConfig.from_pretrained(__A )
else:
_snake_case = convert_config(model[0] , __A )
_snake_case = model[0].eval()
_snake_case = True if config.feat_extract_norm == 'layer' else False
_snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
if is_finetuned:
if dict_path:
_snake_case = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.eos_index
_snake_case = len(target_dict.symbols )
_snake_case = os.path.join(__A , 'vocab.json' )
if not os.path.isdir(__A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
with open(__A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , __A )
_snake_case = WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__A , )
_snake_case = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
_snake_case = SEWForCTC(__A )
else:
_snake_case = SEWModel(__A )
feature_extractor.save_pretrained(__A )
recursively_load_weights(__A , __A , __A )
hf_model.save_pretrained(__A )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase : Union[str, Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 42
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , "Tatoeba directory does not exist." )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCAmelCase_ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=lowerCAmelCase_ )
assert mmeta["long_pair"] == "heb-eng"
| 108
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """xlnet"""
__lowercase = ["""mems"""]
__lowercase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase_=3_20_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=40_96 , lowerCAmelCase_="gelu" , lowerCAmelCase_=True , lowerCAmelCase_="bi" , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=-1 , lowerCAmelCase_=False , lowerCAmelCase_="last" , lowerCAmelCase_=True , lowerCAmelCase_="tanh" , lowerCAmelCase_=0.1 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = d_model
_snake_case = n_layer
_snake_case = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
_snake_case = d_model // n_head
_snake_case = ff_activation
_snake_case = d_inner
_snake_case = untie_r
_snake_case = attn_type
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = dropout
_snake_case = mem_len
_snake_case = reuse_len
_snake_case = bi_data
_snake_case = clamp_len
_snake_case = same_length
_snake_case = summary_type
_snake_case = summary_use_proj
_snake_case = summary_activation
_snake_case = summary_last_dropout
_snake_case = start_n_top
_snake_case = end_n_top
_snake_case = bos_token_id
_snake_case = pad_token_id
_snake_case = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , lowerCAmelCase_ , )
_snake_case = kwargs['use_cache']
_snake_case = use_mems_eval
_snake_case = use_mems_train
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 42
| 0
|
"""simple docstring"""
a = range(2, 20 + 1)
a = [10**k for k in range(ks[-1] + 1)]
a = {}
def _snake_case ( _snake_case : Dict , _snake_case : List[str] , _snake_case : Dict , _snake_case : int ) -> Any:
'''simple docstring'''
_A = sum(a_i[j] for j in range(__A , len(__A ) ) )
_A = sum(a_i[j] * base[j] for j in range(min(len(__A ) , __A ) ) )
_A , _A = 0, 0
_A = n - i
_A = memo.get(__A )
if sub_memo is not None:
_A = sub_memo.get(__A )
if jumps is not None and len(__A ) > 0:
# find and make the largest jump without going over
_A = -1
for _k in range(len(__A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_A = _k
break
if max_jump >= 0:
_A , _A , _A = jumps[max_jump]
# since the difference between jumps is cached, add c
_A = diff + c
for j in range(min(__A , len(__A ) ) ):
_A , _A = divmod(__A , 10 )
if new_c > 0:
add(__A , __A , __A )
else:
_A = []
else:
_A = {c: []}
_A = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_A , _A = next_term(__A , k - 1 , i + dn , __A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_A , _A = compute(__A , __A , i + dn , __A )
diff += _diff
dn += terms_jumped
_A = sub_memo[c]
# keep jumps sorted by # of terms skipped
_A = 0
while j < len(__A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__A , (diff, dn, k) )
return (diff, dn)
def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Dict , _snake_case : Tuple ) -> List[Any]:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(__A ):
a_i.extend([0 for _ in range(k - len(__A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_A = i
_A , _A , _A = 0, 0, 0
for j in range(len(__A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_A = ds_c + ds_b
diff += addend
_A = 0
for j in range(__A ):
_A = a_i[j] + addend
_A , _A = divmod(__A , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__A , __A , __A )
return diff, i - start_i
def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : List[str] ) -> Tuple:
'''simple docstring'''
for j in range(__A , len(__A ) ):
_A = digits[j] + addend
if s >= 10:
_A , _A = divmod(__A , 10 )
_A = addend // 10 + quotient
else:
_A = s
_A = addend // 10
if addend == 0:
break
while addend > 0:
_A , _A = divmod(__A , 10 )
digits.append(__A )
def _snake_case ( _snake_case : Any = 10**15 ) -> int:
'''simple docstring'''
_A = [1]
_A = 1
_A = 0
while True:
_A , _A = next_term(__A , 20 , i + dn , __A )
dn += terms_jumped
if dn == n - i:
break
_A = 0
for j in range(len(__A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 315
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
_snake_case = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_snake_case = model(lowerCAmelCase_ )['last_hidden_state']
_snake_case = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
# compare the actual values for a slice.
_snake_case = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 42
| 0
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__snake_case = logging.get_logger(__name__)
def _A ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def _A ( _lowercase , _lowercase , _lowercase = None ) -> Any:
"""simple docstring"""
__UpperCamelCase = tesseract_config if tesseract_config is not None else ''
# apply OCR
__UpperCamelCase = to_pil_image(__A )
__UpperCamelCase, __UpperCamelCase = pil_image.size
__UpperCamelCase = pytesseract.image_to_data(__A , lang=__A , output_type='dict' , config=__A )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
__UpperCamelCase = [idx for idx, word in enumerate(__A ) if not word.strip()]
__UpperCamelCase = [word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
__UpperCamelCase = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
__UpperCamelCase = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
__UpperCamelCase = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
__UpperCamelCase = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__UpperCamelCase = []
for x, y, w, h in zip(__A , __A , __A , __A ):
__UpperCamelCase = [x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
__UpperCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __lowerCamelCase (_lowerCamelCase ):
_lowercase = ["""pixel_values"""]
def __init__( self: List[str],A_: Any = True,A_: List[str] = None,A_: Tuple = PILImageResampling.BILINEAR,A_: Dict = True,A_: List[str] = None,A_: int = "",**A_: Tuple,):
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
__UpperCamelCase = size if size is not None else {'height': 224, 'width': 224}
__UpperCamelCase = get_size_dict(lowerCAmelCase_ )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = resample
__UpperCamelCase = apply_ocr
__UpperCamelCase = ocr_lang
__UpperCamelCase = tesseract_config
def snake_case_ ( self: Tuple,A_: Optional[Any],A_: Any,A_: Dict = PILImageResampling.BILINEAR,A_: Tuple = None,**A_: str,):
'''simple docstring'''
__UpperCamelCase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
__UpperCamelCase = (size['height'], size['width'])
return resize(lowerCAmelCase_,size=lowerCAmelCase_,resample=lowerCAmelCase_,data_format=lowerCAmelCase_,**lowerCAmelCase_ )
def snake_case_ ( self: Tuple,A_: Tuple,A_: str = None,A_: str = None,A_: Optional[Any] = None,A_: Dict = None,A_: Union[str, Any] = None,A_: Optional[Any] = None,A_: Tuple = None,A_: Union[str, Any] = ChannelDimension.FIRST,**A_: List[Any],):
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(lowerCAmelCase_ )
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
__UpperCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
__UpperCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
__UpperCamelCase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if apply_ocr:
requires_backends(self,'pytesseract' )
__UpperCamelCase = []
__UpperCamelCase = []
for image in images:
__UpperCamelCase, __UpperCamelCase = apply_tesseract(lowerCAmelCase_,lowerCAmelCase_,lowerCAmelCase_ )
words_batch.append(lowerCAmelCase_ )
boxes_batch.append(lowerCAmelCase_ )
if do_resize:
__UpperCamelCase = [self.resize(image=lowerCAmelCase_,size=lowerCAmelCase_,resample=lowerCAmelCase_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__UpperCamelCase = [flip_channel_order(lowerCAmelCase_ ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(lowerCAmelCase_,lowerCAmelCase_ ) for image in images]
__UpperCamelCase = BatchFeature(data={'pixel_values': images},tensor_type=lowerCAmelCase_ )
if apply_ocr:
__UpperCamelCase = words_batch
__UpperCamelCase = boxes_batch
return data
| 310
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 42
| 0
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__UpperCamelCase : Optional[Any] = 8
def A ( _lowercase , _lowercase=BITS ):
SCREAMING_SNAKE_CASE : int = x.device
SCREAMING_SNAKE_CASE : Union[str, Any] = (x * 255).int().clamp(0 , 255 )
SCREAMING_SNAKE_CASE : int = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A )
SCREAMING_SNAKE_CASE : int = rearrange(__A , '''d -> d 1 1''' )
SCREAMING_SNAKE_CASE : Optional[Any] = rearrange(__A , '''b c h w -> b c 1 h w''' )
SCREAMING_SNAKE_CASE : Tuple = ((x & mask) != 0).float()
SCREAMING_SNAKE_CASE : Optional[Any] = rearrange(__A , '''b c d h w -> b (c d) h w''' )
SCREAMING_SNAKE_CASE : List[str] = bits * 2 - 1
return bits
def A ( _lowercase , _lowercase=BITS ):
SCREAMING_SNAKE_CASE : Any = x.device
SCREAMING_SNAKE_CASE : int = (x > 0).int()
SCREAMING_SNAKE_CASE : Tuple = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A , dtype=torch.intaa )
SCREAMING_SNAKE_CASE : Optional[int] = rearrange(__A , '''d -> d 1 1''' )
SCREAMING_SNAKE_CASE : List[str] = rearrange(__A , '''b (c d) h w -> b c d h w''' , d=8 )
SCREAMING_SNAKE_CASE : Union[str, Any] = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def A ( self , _lowercase , _lowercase , _lowercase , _lowercase = 0.0 , _lowercase = True , _lowercase=None , _lowercase = True , ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
SCREAMING_SNAKE_CASE : List[Any] = self.bit_scale
if self.config.clip_sample:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(__A , -scale , __A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
SCREAMING_SNAKE_CASE : str = self._get_variance(__A , __A )
SCREAMING_SNAKE_CASE : str = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
SCREAMING_SNAKE_CASE : Any = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE : Optional[int] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE : int = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
SCREAMING_SNAKE_CASE : Optional[Any] = model_output.device if torch.is_tensor(__A ) else '''cpu'''
SCREAMING_SNAKE_CASE : str = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__A ).to(__A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_variance(__A , __A ) ** 0.5 * eta * noise
SCREAMING_SNAKE_CASE : Tuple = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
def A ( self , _lowercase , _lowercase , _lowercase , _lowercase="epsilon" , _lowercase=None , _lowercase = True , ):
SCREAMING_SNAKE_CASE : int = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = torch.split(__A , sample.shape[1] , dim=1 )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE : Dict = self.alphas_cumprod[t]
SCREAMING_SNAKE_CASE : Optional[int] = self.alphas_cumprod[t - 1] if t > 0 else self.one
SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
SCREAMING_SNAKE_CASE : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
SCREAMING_SNAKE_CASE : int = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
SCREAMING_SNAKE_CASE : List[str] = self.bit_scale
if self.config.clip_sample:
SCREAMING_SNAKE_CASE : Optional[int] = torch.clamp(__A , -scale , __A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE : int = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE : Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
SCREAMING_SNAKE_CASE : Tuple = 0
if t > 0:
SCREAMING_SNAKE_CASE : Any = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__A ).to(model_output.device )
SCREAMING_SNAKE_CASE : str = (self._get_variance(__A , predicted_variance=__A ) ** 0.5) * noise
SCREAMING_SNAKE_CASE : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
class lowercase__ ( _lowerCamelCase):
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] = 1.0 , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : str = bit_scale
SCREAMING_SNAKE_CASE : Tuple = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCamelCase__ : Optional[int] = 256 , UpperCamelCase__ : Tuple = 256 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[Any] = 1 , UpperCamelCase__ : str = "pil" , UpperCamelCase__ : Optional[Any] = True , **UpperCamelCase__ : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE : Any = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
SCREAMING_SNAKE_CASE : Any = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
SCREAMING_SNAKE_CASE : int = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : int = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
SCREAMING_SNAKE_CASE : Tuple = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 182
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase : List[str] = logging.get_logger("transformers.models.speecht5")
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Dict:
hf_model.apply_weight_norm()
_snake_case = checkpoint['input_conv.weight_g']
_snake_case = checkpoint['input_conv.weight_v']
_snake_case = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
_snake_case = checkpoint[F'upsamples.{i}.1.weight_g']
_snake_case = checkpoint[F'upsamples.{i}.1.weight_v']
_snake_case = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
_snake_case = checkpoint['output_conv.1.weight_g']
_snake_case = checkpoint['output_conv.1.weight_v']
_snake_case = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A=None , __A=None , ) -> List[Any]:
if config_path is not None:
_snake_case = SpeechTaHifiGanConfig.from_pretrained(__A )
else:
_snake_case = SpeechTaHifiGanConfig()
_snake_case = SpeechTaHifiGan(__A )
_snake_case = torch.load(__A )
load_weights(orig_checkpoint['model']['generator'] , __A , __A )
_snake_case = np.load(__A )
_snake_case = stats[0].reshape(-1 )
_snake_case = stats[1].reshape(-1 )
_snake_case = torch.from_numpy(__A ).float()
_snake_case = torch.from_numpy(__A ).float()
model.save_pretrained(__A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__A )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase : List[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 42
| 0
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
snake_case_ : Tuple = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
snake_case_ : Optional[int] = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
snake_case_ : Tuple = soup.find('meta', {'property': 'og:image'})["content"]
snake_case_ : Dict = requests.get(image_url).content
snake_case_ : int = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 83
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("DownEncoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_=True , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = torch.nn.Convad(
lowerCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
# down
_snake_case = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_down_block(
lowerCAmelCase_ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# out
_snake_case = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = 2 * out_channels if double_z else out_channels
_snake_case = nn.Convad(block_out_channels[-1] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = x
_snake_case = self.conv_in(lowerCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
_snake_case = down_block(lowerCAmelCase_ )
# middle
_snake_case = self.mid_block(lowerCAmelCase_ )
# post-process
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("UpDecoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_="group" , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = nn.Convad(
lowerCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
_snake_case = in_channels if norm_type == 'spatial' else None
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# up
_snake_case = list(reversed(lowerCAmelCase_ ) )
_snake_case = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = reversed_block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_up_block(
lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , resnet_time_scale_shift=lowerCAmelCase_ , )
self.up_blocks.append(lowerCAmelCase_ )
_snake_case = output_channel
# out
if norm_type == "spatial":
_snake_case = SpatialNorm(block_out_channels[0] , lowerCAmelCase_ )
else:
_snake_case = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = nn.Convad(block_out_channels[0] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = z
_snake_case = self.conv_in(lowerCAmelCase_ )
_snake_case = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# middle
_snake_case = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = up_block(lowerCAmelCase_ , lowerCAmelCase_ )
# post-process
if latent_embeds is None:
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
else:
_snake_case = self.conv_norm_out(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="random" , lowerCAmelCase_=False , lowerCAmelCase_=True ):
"""simple docstring"""
super().__init__()
_snake_case = n_e
_snake_case = vq_embed_dim
_snake_case = beta
_snake_case = legacy
_snake_case = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_snake_case = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
_snake_case = self.used.shape[0]
_snake_case = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_snake_case = self.re_embed
_snake_case = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
_snake_case = n_e
_snake_case = sane_index_shape
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
_snake_case = (inds[:, :, None] == used[None, None, ...]).long()
_snake_case = match.argmax(-1 )
_snake_case = match.sum(2 ) < 1
if self.unknown_index == "random":
_snake_case = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_snake_case = self.unknown_index
return new.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_snake_case = 0 # simply set to zero
_snake_case = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase_ )
return back.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = z.permute(0 , 2 , 3 , 1 ).contiguous()
_snake_case = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_snake_case = torch.argmin(torch.cdist(lowerCAmelCase_ , self.embedding.weight ) , dim=1 )
_snake_case = self.embedding(lowerCAmelCase_ ).view(z.shape )
_snake_case = None
_snake_case = None
# compute loss for embedding
if not self.legacy:
_snake_case = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_snake_case = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_snake_case = z + (z_q - z).detach()
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_snake_case = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_snake_case = self.remap_to_used(lowerCAmelCase_ )
_snake_case = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_snake_case = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if self.remap is not None:
_snake_case = indices.reshape(shape[0] , -1 ) # add batch axis
_snake_case = self.unmap_to_all(lowerCAmelCase_ )
_snake_case = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_snake_case = self.embedding(lowerCAmelCase_ )
if shape is not None:
_snake_case = z_q.view(lowerCAmelCase_ )
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = parameters
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , dim=1 )
_snake_case = torch.clamp(self.logvar , -30.0 , 20.0 )
_snake_case = deterministic
_snake_case = torch.exp(0.5 * self.logvar )
_snake_case = torch.exp(self.logvar )
if self.deterministic:
_snake_case = _snake_case = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase ( self , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = randn_tensor(
self.mean.shape , generator=lowerCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_snake_case = self.mean + self.std * sample
return x
def lowerCamelCase ( self , lowerCAmelCase_=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
_snake_case = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
return self.mean
| 42
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = "▁"
__SCREAMING_SNAKE_CASE : Any = {"vocab_file": "sentencepiece.bpe.model"}
__SCREAMING_SNAKE_CASE : List[Any] = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__SCREAMING_SNAKE_CASE : Tuple = {
"facebook/xglm-564M": 2_048,
}
class __A (_lowerCamelCase):
'''simple docstring'''
__lowercase: Optional[int] = VOCAB_FILES_NAMES
__lowercase: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase: int = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any="<s>" , UpperCAmelCase_ : str="</s>" , UpperCAmelCase_ : List[str]="</s>" , UpperCAmelCase_ : List[str]="<s>" , UpperCAmelCase_ : List[Any]="<unk>" , UpperCAmelCase_ : Dict="<pad>" , UpperCAmelCase_ : str = None , **UpperCAmelCase_ : Optional[int] , ) ->Dict:
"""simple docstring"""
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case_ = 7
snake_case_ = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case_ = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase_ ) )
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
snake_case_ = len(self.sp_model )
snake_case_ = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCAmelCase_ )
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
snake_case_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , UpperCAmelCase_ : Tuple ) ->Optional[int]:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] = None ) ->Optional[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case_ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] = None , UpperCAmelCase_ : List[Any] = False ) ->str:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ ))
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ ))
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] = None ) ->int:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Optional[int] ) ->List[Any]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Dict ) ->Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(lowerCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[int] ) ->List[str]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Tuple ) ->Any:
"""simple docstring"""
snake_case_ = """""".join(lowerCAmelCase_ ).replace(lowerCAmelCase_ , """ """ ).strip()
return out_string
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] = None ) ->Dict:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 347
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["LayoutLMv2FeatureExtractor"]
UpperCamelCase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
lowercase : Tuple = parser.parse_args()
lowercase : Optional[int] = "cpu"
lowercase : Optional[Any] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
lowercase : Optional[int] = "path-to-your-trained-model"
lowercase : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase : Dict = pipe.to(device)
# to channels last
lowercase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
lowercase : int = pipe.vae.to(memory_format=torch.channels_last)
lowercase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase : Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase : Any = torch.randn(2, 4, 64, 64)
lowercase : Optional[int] = torch.rand(1) * 999
lowercase : Optional[Any] = torch.randn(2, 77, 768)
lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
lowercase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Optional[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase : List[str] = 666
lowercase : Tuple = torch.Generator(device).manual_seed(seed)
lowercase : Union[str, Any] = {"generator": generator}
if args.steps is not None:
lowercase : Dict = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase : List[str] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 42
| 0
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class snake_case :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : List[str]) -> int:
"""simple docstring"""
_snake_case : Dict = data
_snake_case : Tuple = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def UpperCamelCase_ ( self : Tuple) -> Dict:
"""simple docstring"""
_snake_case : Optional[int] = b"""\x80""" + b"""\x00""" * (63 - (len(self.data) + 8) % 64)
_snake_case : Optional[Any] = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data))
return padded_data
def UpperCamelCase_ ( self : int) -> Union[str, Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCamelCase_ ( self : int , lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
_snake_case : List[Any] = list(struct.unpack(""">16L""" , lowerCAmelCase_)) + [0] * 64
for i in range(16 , 80):
_snake_case : Tuple = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCamelCase_ ( self : str) -> Tuple:
"""simple docstring"""
_snake_case : List[Any] = self.padding()
_snake_case : Any = self.split_blocks()
for block in self.blocks:
_snake_case : Any = self.expand_block(lowerCAmelCase_)
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[Any] = self.h
for i in range(0 , 80):
if 0 <= i < 20:
_snake_case : Any = (b & c) | ((~b) & d)
_snake_case : Optional[Any] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
_snake_case : Optional[int] = b ^ c ^ d
_snake_case : List[str] = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
_snake_case : Any = (b & c) | (b & d) | (c & d)
_snake_case : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
_snake_case : str = b ^ c ^ d
_snake_case : Tuple = 0Xc_a_6_2_c_1_d_6
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Any = (
self.rotate(lowerCAmelCase_ , 5) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowerCAmelCase_ , 30),
c,
d,
)
_snake_case : Optional[Any] = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h)
def lowercase ( ) -> Optional[Any]:
_snake_case : Any = b"""Test String"""
assert SHAaHash(__A ).final_hash() == hashlib.shaa(__A ).hexdigest() # noqa: S324
def lowercase ( ) -> Any:
_snake_case : Tuple = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
_snake_case : Optional[Any] = parser.parse_args()
_snake_case : Dict = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
_snake_case : Dict = f.read()
else:
_snake_case : Any = bytes(__A , """utf-8""" )
print(SHAaHash(__A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 317
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __UpperCAmelCase ( _lowerCamelCase ):
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return 0.0
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[int | float, int | float]:
_snake_case = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_snake_case = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> None:
_snake_case = 512
_snake_case = [1] + [0] * (size - 1)
_snake_case = [filter_type.process(__A ) for item in inputs]
_snake_case = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case = np.abs(np.fft.fft(__A ) )
_snake_case = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
_snake_case = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(__A )
plt.show()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> None:
_snake_case = 512
_snake_case = [1] + [0] * (size - 1)
_snake_case = [filter_type.process(__A ) for item in inputs]
_snake_case = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 42
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = ['''image_processor''', '''tokenizer''']
_UpperCamelCase : int = '''CLIPImageProcessor'''
_UpperCamelCase : Union[str, Any] = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self : str , _A : Tuple=None , _A : Optional[int]=None , **_A : Tuple ) -> int:
"""simple docstring"""
lowercase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase_ , )
lowercase : List[str] = kwargs.pop('''feature_extractor''' )
lowercase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : Optional[Any] , _A : str=None , _A : Union[str, Any]=None , _A : List[str]=None , **_A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase : List[str] = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if images is not None:
lowercase : int = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
lowercase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def __a ( self : int , *_A : List[Any] , **_A : str ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __a ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __a ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[int] = self.tokenizer.model_input_names
lowercase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 308
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False , **lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = vocab_size
_snake_case = d_embed
_snake_case = d_proj
_snake_case = cutoffs + [vocab_size]
_snake_case = [0] + self.cutoffs
_snake_case = div_val
_snake_case = self.cutoffs[0]
_snake_case = len(self.cutoffs ) - 1
_snake_case = self.shortlist_size + self.n_clusters
_snake_case = keep_order
_snake_case = []
_snake_case = []
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.n_clusters > 0:
_snake_case = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_weight' )
_snake_case = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_snake_case = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case = self.d_embed // (self.div_val**i)
_snake_case = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' )
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = x
if proj is not None:
_snake_case = tf.einsum('ibd,ed->ibe' , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum('ibd,nd->ibn' , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = tf.range(lp_size[0] , dtype=target.dtype )
_snake_case = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = 0
if self.n_clusters == 0:
_snake_case = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_snake_case = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = []
_snake_case = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_snake_case = (target >= l_idx) & (target < r_idx)
_snake_case = tf.where(lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
_snake_case = self.out_layers[0][0][l_idx:r_idx]
_snake_case = self.out_layers[0][1][l_idx:r_idx]
else:
_snake_case = self.out_layers[i][0]
_snake_case = self.out_layers[i][1]
if i == 0:
_snake_case = tf.concat([cur_W, self.cluster_weight] , 0 )
_snake_case = tf.concat([cur_b, self.cluster_bias] , 0 )
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
_snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
_snake_case = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
_snake_case = tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
_snake_case = tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 42
| 0
|
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> list:
lowerCamelCase = len(__A )
lowerCamelCase = [[0] * n for i in range(__A )]
for i in range(__A ):
lowerCamelCase = y_points[i]
for i in range(2 , __A ):
for j in range(__A , __A ):
lowerCamelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : Dict = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, float]:
_snake_case = len([g for position, g in enumerate(__A ) if g == main_target[position]] )
return (item, float(__A ))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, str]:
_snake_case = random.randint(0 , len(__A ) - 1 )
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = list(__A )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case = random.choice(__A )
return "".join(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , ) -> list[str]:
_snake_case = []
# Generate more children proportionally to the fitness score.
_snake_case = int(parent_a[1] * 100 ) + 1
_snake_case = 10 if child_n >= 10 else child_n
for _ in range(__A ):
_snake_case = population_score[random.randint(0 , __A )][0]
_snake_case , _snake_case = crossover(parent_a[0] , __A )
# Append new string to the population list.
pop.append(mutate(__A , __A ) )
pop.append(mutate(__A , __A ) )
return pop
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_snake_case = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(__A )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(__A )
# Generate random starting population.
_snake_case = []
for _ in range(__A ):
population.append(''.join([random.choice(__A ) for i in range(len(__A ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case , _snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__A )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case = [evaluate(__A , __A ) for item in population]
# Check if there is a matching evolution.
_snake_case = sorted(__A , key=lambda __A : x[1] , reverse=__A )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__A )
# Normalize population score to be between 0 and 1.
_snake_case = [
(item, score / len(__A )) for item, score in population_score
]
# This is selection
for i in range(__A ):
population.extend(select(population_score[int(__A )] , __A , __A ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__A ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowercase , lowercase , lowercase : Tuple = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 42
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 311
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["ChineseCLIPFeatureExtractor"]
lowercase : List[Any] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : str = 1_0_0 ):
'''simple docstring'''
lowerCAmelCase : int = n * (n + 1) * (2 * n + 1) / 6
lowerCAmelCase : Any = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"{solution() = }")
| 108
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = 1
_snake_case = 2
while i * i <= n:
_snake_case = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_snake_case = 1
_snake_case = 1
while True:
i += 1
t_num += i
if count_divisors(__A ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 42
| 0
|
"""simple docstring"""
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a = "allenai"
def _snake_case ( _snake_case : str ) -> List[Any]:
'''simple docstring'''
_A = dict((re.sub(R'@@$' , '' , __A ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , __A ), v) for k, v in d.items() )
_A = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
_A = d[k] # restore
return da
def _snake_case ( _snake_case : Tuple , _snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
assert os.path.exists(__A )
os.makedirs(__A , exist_ok=__A )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_A = basename(__A )
_A = dirname(__A )
_A = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_A = cls.hub_models()
_A = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
_A = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
_A = hub_utils.from_pretrained(
__A , __A , __A , archive_map=__A , **__A )
_A = vars(chkpt['args']['model'] )
_A = args['source_lang']
_A = args['target_lang']
_A = dirname(__A )
_A = basename(__A )
# dicts
_A = os.path.join(__A , F'''dict.{src_lang}.txt''' )
_A = os.path.join(__A , F'''dict.{tgt_lang}.txt''' )
_A = Dictionary.load(__A )
_A = rewrite_dict_keys(src_dict.indices )
_A = len(__A )
_A = os.path.join(__A , 'vocab-src.json' )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_A = True
for k in src_vocab.keys():
if not k.islower():
_A = False
break
_A = Dictionary.load(__A )
_A = rewrite_dict_keys(tgt_dict.indices )
_A = len(__A )
_A = os.path.join(__A , 'vocab-tgt.json' )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# merges_file (bpecodes)
_A = os.path.join(__A , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_A = os.path.join(__A , __A )
if os.path.exists(__A ):
break
with open(__A , encoding='utf-8' ) as fin:
_A = fin.read()
_A = re.sub(R' \d+$' , '' , __A , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(__A , 'w' , encoding='utf-8' ) as fout:
fout.write(__A )
# model config
_A = os.path.join(__A , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
_A = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
_A = 5
_A = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_A = best_score_hparams[model_dir]['length_penalty']
else:
_A = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# tokenizer config
_A = os.path.join(__A , __A )
_A = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# model
_A = chkpt['models'][0]
_A = model.state_dict()
# rename keys to start with 'model.'
_A = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_A = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(__A , __A )
_A = FSMTConfig.from_pretrained(__A )
_A = FSMTForConditionalGeneration(__A )
# check that it loads ok
model_new.load_state_dict(__A , strict=__A )
# save
_A = os.path.join(__A , __A )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(__A , __A )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 315
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __UpperCAmelCase ( _lowerCamelCase ):
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='fill-mask' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_snake_case = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='fill-mask' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
_snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_snake_case = self.get_env()
_snake_case = '1'
_snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import AutoModel\n '
_snake_case = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 42
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
sample_size=(32, 64),in_channels=1,out_channels=1,layers_per_block=2,block_out_channels=(128, 128),down_block_types=('AttnDownBlock2D', 'DownBlock2D'),up_block_types=('UpBlock2D', 'AttnUpBlock2D'),)
return model
@property
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
sample_size=(64, 32),in_channels=1,out_channels=1,layers_per_block=2,block_out_channels=(128, 128),down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D'),up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D'),cross_attention_dim=10,)
return model
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
sample_size=(128, 64),in_channels=1,out_channels=1,latent_channels=1,layers_per_block=2,block_out_channels=(128, 128),down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D'),up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D'),)
__UpperCamelCase = UNetaDModel(
sample_size=(64, 32),in_channels=1,out_channels=1,layers_per_block=2,block_out_channels=(128, 128),down_block_types=('AttnDownBlock2D', 'DownBlock2D'),up_block_types=('UpBlock2D', 'AttnUpBlock2D'),)
return vqvae, unet
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1],y_res=self.dummy_unet.config.sample_size[0],)
__UpperCamelCase = DDPMScheduler()
__UpperCamelCase = AudioDiffusionPipeline(vqvae=lowerCAmelCase_,unet=self.dummy_unet,mel=lowerCAmelCase_,scheduler=lowerCAmelCase_ )
__UpperCamelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__UpperCamelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(42 )
__UpperCamelCase = pipe(generator=lowerCAmelCase_,steps=4 )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
__UpperCamelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(42 )
__UpperCamelCase = pipe(generator=lowerCAmelCase_,steps=4,return_dict=lowerCAmelCase_ )
__UpperCamelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes(),dtype='uint8' )[:10]
__UpperCamelCase = np.frombuffer(image_from_tuple.tobytes(),dtype='uint8' )[:10]
__UpperCamelCase = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1],y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0],)
__UpperCamelCase = DDIMScheduler()
__UpperCamelCase = self.dummy_vqvae_and_unet
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0],unet=dummy_vqvae_and_unet[1],mel=lowerCAmelCase_,scheduler=lowerCAmelCase_ )
__UpperCamelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
np.random.seed(0 )
__UpperCamelCase = np.random.uniform(-1,1,((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__UpperCamelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(42 )
__UpperCamelCase = pipe(raw_audio=lowerCAmelCase_,generator=lowerCAmelCase_,start_step=5,steps=10 )
__UpperCamelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes(),dtype='uint8' )[:10]
__UpperCamelCase = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = self.dummy_unet_condition
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0],unet=lowerCAmelCase_,mel=lowerCAmelCase_,scheduler=lowerCAmelCase_ )
__UpperCamelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
np.random.seed(0 )
__UpperCamelCase = torch.rand((1, 1, 10) )
__UpperCamelCase = pipe(generator=lowerCAmelCase_,encoding=lowerCAmelCase_ )
__UpperCamelCase = output.images[0]
__UpperCamelCase = np.frombuffer(image.tobytes(),dtype='uint8' )[:10]
__UpperCamelCase = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = torch_device
__UpperCamelCase = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
__UpperCamelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__UpperCamelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(42 )
__UpperCamelCase = pipe(generator=lowerCAmelCase_ )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__UpperCamelCase = np.frombuffer(image.tobytes(),dtype='uint8' )[:10]
__UpperCamelCase = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 310
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCAmelCase_ ).to(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained('google/mt5-small' )
_snake_case = tokenizer('Hello there' , return_tensors='pt' ).input_ids
_snake_case = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
_snake_case = model(input_ids.to(lowerCAmelCase_ ) , labels=labels.to(lowerCAmelCase_ ) ).loss
_snake_case = -(labels.shape[-1] * loss.item())
_snake_case = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 42
| 0
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : List[Any] = logging.getLogger()
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = '''\n'''.join(__A )
Path(__A ).open('''w''' ).writelines(__A )
__UpperCamelCase : List[Any] = "patrickvonplaten/t5-tiny-random"
__UpperCamelCase : Optional[int] = "sshleifer/bart-tiny-random"
__UpperCamelCase : Dict = "sshleifer/tiny-mbart"
__UpperCamelCase : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( _lowerCamelCase):
def __A ( self : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
SCREAMING_SNAKE_CASE : Tuple = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE : Union[str, Any] = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
SCREAMING_SNAKE_CASE : Dict = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
SCREAMING_SNAKE_CASE : Any = f"""\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n """.split()
with patch.object(lowerCAmelCase_ , '''argv''' , lowerCAmelCase_ ):
run_generate()
assert Path(lowerCAmelCase_ ).exists()
# os.remove(Path(output_file_name))
def __A ( self : List[str] ):
'''simple docstring'''
self.run_eval_tester(lowerCAmelCase_ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __A ( self : Any , UpperCamelCase__ : int ):
'''simple docstring'''
self.run_eval_tester(lowerCAmelCase_ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __A ( self : str , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
SCREAMING_SNAKE_CASE : Dict = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
SCREAMING_SNAKE_CASE : str = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE : List[Any] = str(tmp_dir / '''scores.json''' )
SCREAMING_SNAKE_CASE : Tuple = str(tmp_dir / '''val.target''' )
_dump_articles(lowerCAmelCase_ , text['''en'''] )
_dump_articles(lowerCAmelCase_ , text['''de'''] )
SCREAMING_SNAKE_CASE : List[Any] = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""\n run_eval_search.py\n {model}\n {str(lowerCAmelCase_ )}\n {str(lowerCAmelCase_ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n """.split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(lowerCAmelCase_ , '''argv''' , lowerCAmelCase_ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE : Dict = [''' num_beams | length_penalty''', model, '''Best score args''']
SCREAMING_SNAKE_CASE : Optional[Any] = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(lowerCAmelCase_ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCAmelCase_ ).exists()
os.remove(Path(lowerCAmelCase_ ) )
| 182
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : List[str] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1_0_0_0 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 83
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowercase : set[int] = {ord(char) for char in VALID_CHARS}
lowercase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str | None:
_snake_case = ""
_snake_case = 42
_snake_case = 42
_snake_case = 42
for keychar, cipherchar in zip(cycle(__A ) , __A ):
_snake_case = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__A )
return decoded
def SCREAMING_SNAKE_CASE__ ( __A ) -> list[str]:
_snake_case = []
for key in product(__A , repeat=3 ):
_snake_case = try_key(__A , __A )
if encoded is not None:
possibles.append(__A )
return possibles
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__ ( __A = "p059_cipher.txt" ) -> int:
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = Path(__A ).parent.joinpath(__A ).read_text(encoding='utf-8' )
_snake_case = [int(__A ) for number in data.strip().split(',' )]
_snake_case = filter_valid_chars(__A )
for common_word in COMMON_WORDS:
_snake_case = filter_common_word(__A , __A )
if len(__A ) == 1:
break
_snake_case = possibles[0]
return sum(ord(__A ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A (_lowerCamelCase):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , """depth_multiplier""" ) )
class __A :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : Any=0.25 , UpperCAmelCase_ : List[str]=8 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[Any]=1_024 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Optional[Any]="relu6" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : int=None , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = depth_multiplier
snake_case_ = min_depth
snake_case_ = tf_padding
snake_case_ = int(last_hidden_size * depth_multiplier )
snake_case_ = output_stride
snake_case_ = hidden_act
snake_case_ = classifier_dropout_prob
snake_case_ = use_labels
snake_case_ = is_training
snake_case_ = num_labels
snake_case_ = initializer_range
snake_case_ = scope
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = MobileNetVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
snake_case_ = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ) ->List[str]:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MobileNetVaForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
snake_case_ = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
'''simple docstring'''
__lowercase: List[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__lowercase: List[str] = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__lowercase: List[Any] = False
__lowercase: Optional[Any] = False
__lowercase: Optional[Any] = False
__lowercase: Dict = False
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
snake_case_ = MobileNetVaModelTester(self )
snake_case_ = MobileNetVaConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowerCAmelCase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ):
snake_case_ = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
snake_case_ = outputs.hidden_states
snake_case_ = 26
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MobileNetVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def _a ( ) -> List[Any]:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(lowerCAmelCase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(**lowerCAmelCase_ )
# verify the logits
snake_case_ = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
snake_case_ = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 347
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A = 1_000_000 ) -> int:
_snake_case = limit + 1
_snake_case = [0] * limit
for first_term in range(1 , __A ):
for n in range(__A , __A , __A ):
_snake_case = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_snake_case = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class snake_case_ :
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict=13 , lowercase_ : str=7 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=99 , lowercase_ : Tuple=32 , lowercase_ : Dict=2 , lowercase_ : Dict=4 , lowercase_ : Optional[Any]=37 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : int=5_12 , lowercase_ : str=16 , lowercase_ : Optional[Any]=2 , lowercase_ : List[Any]=0.02 , lowercase_ : int=3 , lowercase_ : Optional[Any]=4 , lowercase_ : str=None , lowercase_ : Dict=0 , ) -> List[str]:
lowercase__ : Optional[Any] = parent
lowercase__ : str = batch_size
lowercase__ : int = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Any = use_input_mask
lowercase__ : int = use_token_type_ids
lowercase__ : List[str] = use_labels
lowercase__ : Optional[int] = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : int = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : int = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Any = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : int = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Dict = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : List[Any] = scope
lowercase__ : List[Any] = projection_dim
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowercase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple = None
if self.use_token_type_ids:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = None
lowercase__ : Optional[int] = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : str = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
lowercase__ : Optional[int] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> List[str]:
lowercase__ : List[Any] = TFDPRContextEncoder(config=lowerCAmelCase_ )
lowercase__ : Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
lowercase__ : int = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
lowercase__ : Tuple = model(lowerCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ) -> Optional[Any]:
lowercase__ : Optional[int] = TFDPRQuestionEncoder(config=lowerCAmelCase_ )
lowercase__ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
lowercase__ : List[Any] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
lowercase__ : Optional[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : str ) -> List[str]:
lowercase__ : Optional[int] = TFDPRReader(config=lowerCAmelCase_ )
lowercase__ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[Any] = config_and_inputs
lowercase__ : Union[str, Any] = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class snake_case_ ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__A : Dict = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__A : Tuple = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__A : Union[str, Any] = False
__A : List[Any] = False
__A : List[str] = False
__A : Optional[int] = False
__A : List[Any] = False
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : int = TFDPRModelTester(self )
lowercase__ : str = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowerCAmelCase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowerCAmelCase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowerCAmelCase_ )
@slow
def __UpperCamelCase ( self : Dict ) -> Any:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = TFDPRContextEncoder.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = TFDPRContextEncoder.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = TFDPRQuestionEncoder.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = TFDPRReader.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class snake_case_ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ) -> Any:
lowercase__ : List[Any] = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowercase__ : Any = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
lowercase__ : List[Any] = model(lowerCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowercase__ : str = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 87
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Tuple = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
_snake_case : Any = hf_hub_url(repo_id=__A , path=__A , revision=__A )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__A )}'''
| 317
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE__ ( __A = 1_500_000 ) -> int:
_snake_case = defaultdict(__A )
_snake_case = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ):
if gcd(__A , __A ) > 1:
continue
_snake_case = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__A , limit + 1 , __A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Optional[Any] = '''xmod'''
def __init__( self : int , _A : Any=30_522 , _A : Tuple=768 , _A : Tuple=12 , _A : Optional[Any]=12 , _A : Optional[int]=3_072 , _A : Optional[int]="gelu" , _A : Optional[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Dict=2 , _A : List[Any]=0.02 , _A : Optional[Any]=1E-12 , _A : Optional[int]=1 , _A : Tuple=0 , _A : Union[str, Any]=2 , _A : Optional[Any]="absolute" , _A : Any=True , _A : List[Any]=None , _A : Optional[Any]=False , _A : Union[str, Any]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Optional[int]=True , _A : List[Any]=("en_XX",) , _A : str=None , **_A : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
lowercase : int = vocab_size
lowercase : int = hidden_size
lowercase : Optional[int] = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Dict = hidden_act
lowercase : int = intermediate_size
lowercase : List[Any] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : Tuple = max_position_embeddings
lowercase : str = type_vocab_size
lowercase : Optional[Any] = initializer_range
lowercase : Union[str, Any] = layer_norm_eps
lowercase : Tuple = position_embedding_type
lowercase : Optional[int] = use_cache
lowercase : Any = classifier_dropout
lowercase : Dict = pre_norm
lowercase : int = adapter_reduction_factor
lowercase : str = adapter_layer_norm
lowercase : Union[str, Any] = adapter_reuse_layer_norm
lowercase : List[Any] = ln_before_adapter
lowercase : Tuple = list(lowerCAmelCase_ )
lowercase : List[str] = default_language
class _A ( _lowerCamelCase ):
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 308
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowercase : Optional[Any] = False
class __UpperCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger '
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = generator.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger '
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
_snake_case = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 42
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase : Tuple = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A = 100 ) -> int:
_snake_case = n * (n + 1) * (2 * n + 1) / 6
_snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowercase ( __magic_name__ ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowercase ( ):
'''simple docstring'''
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCAmelCase : List[str] = [1, 2, 3]
with pytest.raises(__A ):
with parallel_backend("unsupported backend" ):
map_nested(__A , __A , num_proc=2 )
with pytest.raises(__A ):
with parallel_backend("unsupported backend" ):
map_nested(__A , __A , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = [1, 2]
UpperCAmelCase : Union[str, Any] = {"a": 1, "b": 2}
UpperCAmelCase : List[str] = {"a": [1, 2], "b": [3, 4]}
UpperCAmelCase : Dict = {"a": {"1": 1}, "b": 2}
UpperCAmelCase : Dict = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCAmelCase : Optional[Any] = [2, 3]
UpperCAmelCase : Dict = {"a": 2, "b": 3}
UpperCAmelCase : Any = {"a": [2, 3], "b": [4, 5]}
UpperCAmelCase : Optional[int] = {"a": {"1": 2}, "b": 3}
UpperCAmelCase : List[str] = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
| 311
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : str = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Dict:
for attribute in key.split('.' ):
_snake_case = getattr(__A , __A )
if weight_type is not None:
_snake_case = getattr(__A , __A ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
_snake_case = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__A )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , __A )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "weight" in name:
_snake_case = 'weight'
elif "bias" in name:
_snake_case = 'bias'
else:
_snake_case = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> int:
_snake_case = full_name.split('conv_layers.' )[-1]
_snake_case = name.split('.' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = SEWConfig()
if is_finetuned:
_snake_case = model.wav_encoder.wav_model.cfg
else:
_snake_case = model.cfg
_snake_case = fs_config.conv_bias
_snake_case = eval(fs_config.conv_feature_layers )
_snake_case = [x[0] for x in conv_layers]
_snake_case = [x[1] for x in conv_layers]
_snake_case = [x[2] for x in conv_layers]
_snake_case = 'gelu'
_snake_case = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
_snake_case = 0.0
_snake_case = fs_config.activation_fn.name
_snake_case = fs_config.encoder_embed_dim
_snake_case = 0.0_2
_snake_case = fs_config.encoder_ffn_embed_dim
_snake_case = 1e-5
_snake_case = fs_config.encoder_layerdrop
_snake_case = fs_config.encoder_attention_heads
_snake_case = fs_config.conv_pos_groups
_snake_case = fs_config.conv_pos
_snake_case = len(__A )
_snake_case = fs_config.encoder_layers
_snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_snake_case = model.cfg
_snake_case = fs_config.final_dropout
_snake_case = fs_config.layerdrop
_snake_case = fs_config.activation_dropout
_snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_snake_case = fs_config.attention_dropout
_snake_case = fs_config.dropout_input
_snake_case = fs_config.dropout
_snake_case = fs_config.mask_channel_length
_snake_case = fs_config.mask_channel_prob
_snake_case = fs_config.mask_length
_snake_case = fs_config.mask_prob
_snake_case = 'Wav2Vec2FeatureExtractor'
_snake_case = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=None , __A=None , __A=True ) -> List[str]:
if is_finetuned:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_snake_case = SEWConfig.from_pretrained(__A )
else:
_snake_case = convert_config(model[0] , __A )
_snake_case = model[0].eval()
_snake_case = True if config.feat_extract_norm == 'layer' else False
_snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
if is_finetuned:
if dict_path:
_snake_case = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.eos_index
_snake_case = len(target_dict.symbols )
_snake_case = os.path.join(__A , 'vocab.json' )
if not os.path.isdir(__A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
with open(__A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , __A )
_snake_case = WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__A , )
_snake_case = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
_snake_case = SEWForCTC(__A )
else:
_snake_case = SEWModel(__A )
feature_extractor.save_pretrained(__A )
recursively_load_weights(__A , __A , __A )
hf_model.save_pretrained(__A )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase : Union[str, Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 42
| 0
|
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowerCAmelCase__ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = False
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if not self.initialized:
lowerCAmelCase : str = RagRetriever(
lowerCAmelCase_ , question_encoder_tokenizer=lowerCAmelCase_ , generator_tokenizer=lowerCAmelCase_ , index=lowerCAmelCase_ , init_retrieval=lowerCAmelCase_ , )
lowerCAmelCase : Tuple = True
def lowercase__ ( self ):
"""simple docstring"""
self.retriever.index.init_index()
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.retriever._main_retrieve(lowerCAmelCase_ , lowerCAmelCase_ )
return doc_ids, retrieved_doc_embeds
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(lowerCAmelCase_ ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you\'ll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
lowerCAmelCase_ , question_encoder_tokenizer=lowerCAmelCase_ , generator_tokenizer=lowerCAmelCase_ , index=lowerCAmelCase_ , init_retrieval=lowerCAmelCase_ , )
lowerCAmelCase : Optional[Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for worker in self.retrieval_workers
] )
def lowercase__ ( self ):
"""simple docstring"""
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowerCAmelCase : str = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowerCAmelCase , lowerCAmelCase : str = ray.get(random_worker.retrieve.remote(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
lowerCAmelCase , lowerCAmelCase : List[Any] = self._main_retrieve(lowerCAmelCase_ , lowerCAmelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase_ )
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__=None , **snake_case__ ):
"""simple docstring"""
return super(lowerCAmelCase_ , cls ).get_tokenizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = kwargs.pop("config" , lowerCAmelCase_ ) or RagConfig.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
lowerCAmelCase : Any = RagTokenizer.from_pretrained(lowerCAmelCase_ , config=lowerCAmelCase_ )
lowerCAmelCase : Optional[Any] = rag_tokenizer.question_encoder
lowerCAmelCase : Optional[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCAmelCase : Tuple = "custom"
lowerCAmelCase : Optional[int] = CustomHFIndex(config.retrieval_vector_size , lowerCAmelCase_ )
else:
lowerCAmelCase : str = cls._build_index(lowerCAmelCase_ )
return cls(
lowerCAmelCase_ , question_encoder_tokenizer=lowerCAmelCase_ , generator_tokenizer=lowerCAmelCase_ , retrieval_workers=lowerCAmelCase_ , index=lowerCAmelCase_ , )
| 108
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """xlnet"""
__lowercase = ["""mems"""]
__lowercase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase_=3_20_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=40_96 , lowerCAmelCase_="gelu" , lowerCAmelCase_=True , lowerCAmelCase_="bi" , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=-1 , lowerCAmelCase_=False , lowerCAmelCase_="last" , lowerCAmelCase_=True , lowerCAmelCase_="tanh" , lowerCAmelCase_=0.1 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = d_model
_snake_case = n_layer
_snake_case = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
_snake_case = d_model // n_head
_snake_case = ff_activation
_snake_case = d_inner
_snake_case = untie_r
_snake_case = attn_type
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = dropout
_snake_case = mem_len
_snake_case = reuse_len
_snake_case = bi_data
_snake_case = clamp_len
_snake_case = same_length
_snake_case = summary_type
_snake_case = summary_use_proj
_snake_case = summary_activation
_snake_case = summary_last_dropout
_snake_case = start_n_top
_snake_case = end_n_top
_snake_case = bos_token_id
_snake_case = pad_token_id
_snake_case = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , lowerCAmelCase_ , )
_snake_case = kwargs['use_cache']
_snake_case = use_mems_eval
_snake_case = use_mems_train
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : Dict ) -> int:
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__A ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__A ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
_snake_case = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_snake_case = model(lowerCAmelCase_ )['last_hidden_state']
_snake_case = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
# compare the actual values for a slice.
_snake_case = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 42
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def _A ( _lowercase , _lowercase , _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = state_dict.pop(__A )
__UpperCamelCase = val
def _A ( _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
__UpperCamelCase = value
else:
__UpperCamelCase = value
return new_state_dict
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCamelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__UpperCamelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[:2_56, :]
__UpperCamelCase = in_proj_bias[:2_56]
__UpperCamelCase = in_proj_weight[2_56:5_12, :]
__UpperCamelCase = in_proj_bias[2_56:5_12]
__UpperCamelCase = in_proj_weight[-2_56:, :]
__UpperCamelCase = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__UpperCamelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__UpperCamelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[:2_56, :]
__UpperCamelCase = in_proj_bias[:2_56]
__UpperCamelCase = in_proj_weight[2_56:5_12, :]
__UpperCamelCase = in_proj_bias[2_56:5_12]
__UpperCamelCase = in_proj_weight[-2_56:, :]
__UpperCamelCase = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
__UpperCamelCase = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
__UpperCamelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__UpperCamelCase = in_proj_weight_cross_attn[:2_56, :]
__UpperCamelCase = in_proj_bias_cross_attn[:2_56]
__UpperCamelCase = in_proj_weight_cross_attn[2_56:5_12, :]
__UpperCamelCase = in_proj_bias_cross_attn[2_56:5_12]
__UpperCamelCase = in_proj_weight_cross_attn[-2_56:, :]
__UpperCamelCase = in_proj_bias_cross_attn[-2_56:]
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase, __UpperCamelCase = image.size
__UpperCamelCase = max(__A , __A )
__UpperCamelCase = 8_00 if 'detection' in checkpoint_url else 10_00
__UpperCamelCase = target_max_size / current_max_size
__UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = F.to_tensor(__A )
__UpperCamelCase = F.normalize(__A , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase ) -> Any:
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
__UpperCamelCase = torch.hub.load_state_dict_from_url(__A , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(__A , __A , __A )
__UpperCamelCase = rename_backbone_keys(__A )
# query, key and value matrices need special treatment
read_in_q_k_v(__A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCamelCase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__UpperCamelCase = state_dict.pop(__A )
__UpperCamelCase = val
# create HuggingFace model and load state dict
__UpperCamelCase = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__UpperCamelCase = 15
__UpperCamelCase = 2
__UpperCamelCase = {0: 'table', 1: 'table rotated'}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
__UpperCamelCase = 1_25
__UpperCamelCase = 6
__UpperCamelCase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
__UpperCamelCase = DetrImageProcessor(
format='coco_detection' , max_size=8_00 if 'detection' in checkpoint_url else 10_00 )
__UpperCamelCase = TableTransformerForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
# verify our conversion
__UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
__UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=__A )
__UpperCamelCase = Image.open(__A ).convert('RGB' )
__UpperCamelCase = normalize(resize(__A , __A ) ).unsqueeze(0 )
__UpperCamelCase = model(__A )
if "detection" in checkpoint_url:
__UpperCamelCase = (1, 15, 3)
__UpperCamelCase = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
__UpperCamelCase = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
__UpperCamelCase = (1, 1_25, 7)
__UpperCamelCase = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
__UpperCamelCase = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __A , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __A , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
image_processor.save_pretrained(__A )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
__UpperCamelCase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(__A )
image_processor.push_to_hub(__A )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 310
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 42
| 0
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowercase__ ( _lowerCamelCase):
UpperCamelCase_ = (DPMSolverSDEScheduler,)
UpperCamelCase_ = 10
def __A ( self : Optional[Any] , **UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**lowerCAmelCase_ )
return config
def __A ( self : str ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __A ( self : str ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __A ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = output.prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : str = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Tuple = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Dict = sample.to(lowerCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = output.prev_sample
SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter.to(lowerCAmelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Any = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE : int = torch.sum(torch.abs(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : str = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCAmelCase_ , use_karras_sigmas=lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter.to(lowerCAmelCase_ ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCAmelCase_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Tuple = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE : Dict = torch.sum(torch.abs(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 182
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase : List[str] = logging.get_logger("transformers.models.speecht5")
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Dict:
hf_model.apply_weight_norm()
_snake_case = checkpoint['input_conv.weight_g']
_snake_case = checkpoint['input_conv.weight_v']
_snake_case = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
_snake_case = checkpoint[F'upsamples.{i}.1.weight_g']
_snake_case = checkpoint[F'upsamples.{i}.1.weight_v']
_snake_case = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
_snake_case = checkpoint['output_conv.1.weight_g']
_snake_case = checkpoint['output_conv.1.weight_v']
_snake_case = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A=None , __A=None , ) -> List[Any]:
if config_path is not None:
_snake_case = SpeechTaHifiGanConfig.from_pretrained(__A )
else:
_snake_case = SpeechTaHifiGanConfig()
_snake_case = SpeechTaHifiGan(__A )
_snake_case = torch.load(__A )
load_weights(orig_checkpoint['model']['generator'] , __A , __A )
_snake_case = np.load(__A )
_snake_case = stats[0].reshape(-1 )
_snake_case = stats[1].reshape(-1 )
_snake_case = torch.from_numpy(__A ).float()
_snake_case = torch.from_numpy(__A ).float()
model.save_pretrained(__A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__A )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase : List[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 42
| 0
|
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A__ ( UpperCAmelCase_ = 3 ):
if isinstance(__A , __A ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(__A ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 1_0:
raise ValueError('number of qubits too large to simulate(>10).' )
_UpperCamelCase : Optional[Any] = QuantumRegister(__A , 'qr' )
_UpperCamelCase : Any = ClassicalRegister(__A , 'cr' )
_UpperCamelCase : Any = QuantumCircuit(__A , __A )
_UpperCamelCase : str = number_of_qubits
for i in range(__A ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__A ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __A , __A )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__A , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__A , __A )
# simulate with 10000 shots
_UpperCamelCase : Optional[int] = Aer.get_backend('qasm_simulator' )
_UpperCamelCase : List[Any] = execute(__A , __A , shots=1_0_0_0_0 )
return job.result().get_counts(__A )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 83
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("DownEncoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_=True , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = torch.nn.Convad(
lowerCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
# down
_snake_case = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_down_block(
lowerCAmelCase_ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# out
_snake_case = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = 2 * out_channels if double_z else out_channels
_snake_case = nn.Convad(block_out_channels[-1] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = x
_snake_case = self.conv_in(lowerCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
_snake_case = down_block(lowerCAmelCase_ )
# middle
_snake_case = self.mid_block(lowerCAmelCase_ )
# post-process
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("UpDecoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_="group" , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = nn.Convad(
lowerCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
_snake_case = in_channels if norm_type == 'spatial' else None
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# up
_snake_case = list(reversed(lowerCAmelCase_ ) )
_snake_case = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = reversed_block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_up_block(
lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , resnet_time_scale_shift=lowerCAmelCase_ , )
self.up_blocks.append(lowerCAmelCase_ )
_snake_case = output_channel
# out
if norm_type == "spatial":
_snake_case = SpatialNorm(block_out_channels[0] , lowerCAmelCase_ )
else:
_snake_case = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = nn.Convad(block_out_channels[0] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = z
_snake_case = self.conv_in(lowerCAmelCase_ )
_snake_case = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# middle
_snake_case = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = up_block(lowerCAmelCase_ , lowerCAmelCase_ )
# post-process
if latent_embeds is None:
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
else:
_snake_case = self.conv_norm_out(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="random" , lowerCAmelCase_=False , lowerCAmelCase_=True ):
"""simple docstring"""
super().__init__()
_snake_case = n_e
_snake_case = vq_embed_dim
_snake_case = beta
_snake_case = legacy
_snake_case = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_snake_case = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
_snake_case = self.used.shape[0]
_snake_case = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_snake_case = self.re_embed
_snake_case = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
_snake_case = n_e
_snake_case = sane_index_shape
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
_snake_case = (inds[:, :, None] == used[None, None, ...]).long()
_snake_case = match.argmax(-1 )
_snake_case = match.sum(2 ) < 1
if self.unknown_index == "random":
_snake_case = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_snake_case = self.unknown_index
return new.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_snake_case = 0 # simply set to zero
_snake_case = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase_ )
return back.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = z.permute(0 , 2 , 3 , 1 ).contiguous()
_snake_case = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_snake_case = torch.argmin(torch.cdist(lowerCAmelCase_ , self.embedding.weight ) , dim=1 )
_snake_case = self.embedding(lowerCAmelCase_ ).view(z.shape )
_snake_case = None
_snake_case = None
# compute loss for embedding
if not self.legacy:
_snake_case = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_snake_case = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_snake_case = z + (z_q - z).detach()
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_snake_case = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_snake_case = self.remap_to_used(lowerCAmelCase_ )
_snake_case = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_snake_case = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if self.remap is not None:
_snake_case = indices.reshape(shape[0] , -1 ) # add batch axis
_snake_case = self.unmap_to_all(lowerCAmelCase_ )
_snake_case = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_snake_case = self.embedding(lowerCAmelCase_ )
if shape is not None:
_snake_case = z_q.view(lowerCAmelCase_ )
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = parameters
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , dim=1 )
_snake_case = torch.clamp(self.logvar , -30.0 , 20.0 )
_snake_case = deterministic
_snake_case = torch.exp(0.5 * self.logvar )
_snake_case = torch.exp(self.logvar )
if self.deterministic:
_snake_case = _snake_case = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase ( self , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = randn_tensor(
self.mean.shape , generator=lowerCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_snake_case = self.mean + self.std * sample
return x
def lowerCamelCase ( self , lowerCAmelCase_=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
_snake_case = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
return self.mean
| 42
| 0
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__SCREAMING_SNAKE_CASE : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__SCREAMING_SNAKE_CASE : int = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
snake_case_ = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=None ) ->Tuple:
"""simple docstring"""
snake_case_ = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
snake_case_ = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
snake_case_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
snake_case_ = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
snake_case_ = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(lowerCAmelCase_ , """w""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , """r""" ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
snake_case_ = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("""Bert""" , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , lowerCAmelCase_ , overwrite_result=re.sub("""DDPM""" , """Test""" , lowerCAmelCase_ ) , )
| 347
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case_ ( unittest.TestCase ):
def __init__( self : Optional[int] , lowercase_ : Any , lowercase_ : Union[str, Any]=7 , lowercase_ : List[str]=3 , lowercase_ : Dict=18 , lowercase_ : str=30 , lowercase_ : Any=4_00 , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=None , lowercase_ : Dict=True , ) -> Tuple:
lowercase__ : int = size if size is not None else {"height": 18, "width": 18}
lowercase__ : str = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Tuple = num_channels
lowercase__ : Dict = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : Union[str, Any] = max_resolution
lowercase__ : Any = do_resize
lowercase__ : Union[str, Any] = size
lowercase__ : Union[str, Any] = apply_ocr
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class snake_case_ ( _lowerCamelCase ,unittest.TestCase ):
__A : Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCamelCase ( self : List[Any] ) -> int:
lowercase__ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCamelCase ( self : Tuple ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> Optional[Any]:
lowercase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "apply_ocr" ) )
def __UpperCamelCase ( self : List[Any] ) -> str:
lowercase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowercase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
pass
def __UpperCamelCase ( self : Tuple ) -> Dict:
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
lowercase__ : str = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase_ )
self.assertIsInstance(encoding.boxes , lowerCAmelCase_ )
# Test batched
lowercase__ : Optional[Any] = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __UpperCamelCase ( self : Any ) -> Any:
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowercase__ : str = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
lowercase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowercase__ : Union[str, Any] = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
lowercase__ : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase__ : Any = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowercase__ : Union[str, Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowercase__ : List[str] = image_processing(lowerCAmelCase_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase__ : List[str] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowercase__ : List[Any] = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase_ )
self.assertListEqual(encoding.boxes , lowerCAmelCase_ )
# with apply_OCR = False
lowercase__ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ )
lowercase__ : Any = image_processing(lowerCAmelCase_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 87
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
lowercase : Tuple = parser.parse_args()
lowercase : Optional[int] = "cpu"
lowercase : Optional[Any] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
lowercase : Optional[int] = "path-to-your-trained-model"
lowercase : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase : Dict = pipe.to(device)
# to channels last
lowercase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
lowercase : int = pipe.vae.to(memory_format=torch.channels_last)
lowercase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase : Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase : Any = torch.randn(2, 4, 64, 64)
lowercase : Optional[int] = torch.rand(1) * 999
lowercase : Optional[Any] = torch.randn(2, 77, 768)
lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
lowercase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Optional[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase : List[str] = 666
lowercase : Tuple = torch.Generator(device).manual_seed(seed)
lowercase : Union[str, Any] = {"generator": generator}
if args.steps is not None:
lowercase : Dict = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase : List[str] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 42
| 0
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
a__ = logging.getLogger(__name__)
class snake_case ( _lowerCamelCase ):
'''simple docstring'''
snake_case_ : int = """token-classification"""
def __init__( self : Optional[int] , lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
if type(lowerCAmelCase_) == dict:
_snake_case : List[Any] = Namespace(**lowerCAmelCase_)
_snake_case : int = import_module("""tasks""")
try:
_snake_case : int = getattr(lowerCAmelCase_ , hparams.task_type)
_snake_case : int = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''')
_snake_case : Optional[int] = self.token_classification_task.get_labels(hparams.labels)
_snake_case : Dict = CrossEntropyLoss().ignore_index
super().__init__(lowerCAmelCase_ , len(self.labels) , self.mode)
def UpperCamelCase_ ( self : Dict , **lowerCAmelCase : Optional[Any]) -> List[Any]:
"""simple docstring"""
return self.model(**lowerCAmelCase_)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any]) -> Any:
"""simple docstring"""
_snake_case : Any = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
_snake_case : Any = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
_snake_case : Optional[int] = self(**lowerCAmelCase_)
_snake_case : Tuple = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCamelCase_ ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = self.hparams
for mode in ["train", "dev", "test"]:
_snake_case : Tuple = self._feature_file(lowerCAmelCase_)
if os.path.exists(lowerCAmelCase_) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
_snake_case : int = torch.load(lowerCAmelCase_)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
_snake_case : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowerCAmelCase_)
_snake_case : List[str] = self.token_classification_task.convert_examples_to_features(
lowerCAmelCase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""]) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowerCAmelCase_ , pad_on_left=bool(self.config.model_type in ["""xlnet"""]) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , lowerCAmelCase_)
torch.save(lowerCAmelCase_ , lowerCAmelCase_)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] = False) -> Optional[int]:
"""simple docstring"""
_snake_case : int = self._feature_file(lowerCAmelCase_)
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
_snake_case : Union[str, Any] = torch.load(lowerCAmelCase_)
_snake_case : Optional[int] = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
_snake_case : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
if features[0].token_type_ids is not None:
_snake_case : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
else:
_snake_case : Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long)
# HACK(we will not use this anymore soon)
_snake_case : Union[str, Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long)
return DataLoader(
TensorDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) , batch_size=lowerCAmelCase_)
def UpperCamelCase_ ( self : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
"""Compute validation""" ""
_snake_case : Optional[int] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
_snake_case : List[Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
_snake_case : Dict = self(**lowerCAmelCase_)
_snake_case , _snake_case : int = outputs[:2]
_snake_case : List[str] = logits.detach().cpu().numpy()
_snake_case : int = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
_snake_case : List[Any] = torch.stack([x["""val_loss"""] for x in outputs]).mean()
_snake_case : Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
_snake_case : Dict = np.argmax(lowerCAmelCase_ , axis=2)
_snake_case : List[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0)
_snake_case : Dict = dict(enumerate(self.labels))
_snake_case : int = [[] for _ in range(out_label_ids.shape[0])]
_snake_case : List[str] = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
_snake_case : Dict = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(lowerCAmelCase_ , lowerCAmelCase_),
"""precision""": precision_score(lowerCAmelCase_ , lowerCAmelCase_),
"""recall""": recall_score(lowerCAmelCase_ , lowerCAmelCase_),
"""f1""": fa_score(lowerCAmelCase_ , lowerCAmelCase_),
}
_snake_case : Union[str, Any] = dict(results.items())
_snake_case : Optional[Any] = results
return ret, preds_list, out_label_list
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case , _snake_case , _snake_case : List[Any] = self._eval_end(lowerCAmelCase_)
_snake_case : int = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Dict) -> int:
"""simple docstring"""
_snake_case , _snake_case , _snake_case : int = self._eval_end(lowerCAmelCase_)
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
_snake_case : int = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase : int , lowerCAmelCase : str) -> int:
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase_ , lowerCAmelCase_)
parser.add_argument(
"""--task_type""" , default="""NER""" , type=lowerCAmelCase_ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""")
parser.add_argument(
"""--max_seq_length""" , default=128 , type=lowerCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=lowerCAmelCase_ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=lowerCAmelCase_ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
a__ = NERTransformer.add_model_specific_args(parser, os.getcwd())
a__ = parser.parse_args()
a__ = NERTransformer(args)
a__ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
a__ = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
a__ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 317
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __UpperCAmelCase ( _lowerCamelCase ):
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return 0.0
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[int | float, int | float]:
_snake_case = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_snake_case = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> None:
_snake_case = 512
_snake_case = [1] + [0] * (size - 1)
_snake_case = [filter_type.process(__A ) for item in inputs]
_snake_case = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case = np.abs(np.fft.fft(__A ) )
_snake_case = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
_snake_case = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(__A )
plt.show()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> None:
_snake_case = 512
_snake_case = [1] + [0] * (size - 1)
_snake_case = [filter_type.process(__A ) for item in inputs]
_snake_case = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 42
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ , __magic_name__=False ) -> int:
'''simple docstring'''
lowercase : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Dict = ''''''
else:
lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : str = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
lowercase : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def snake_case( __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : Dict = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__A , __A )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] = dct.pop(__A )
lowercase : Tuple = val
def snake_case( ) -> Any:
'''simple docstring'''
lowercase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowercase : List[str] = 8
# set labels if required
if not base_model:
lowercase : Tuple = 10_00
lowercase : List[str] = '''huggingface/label-files'''
lowercase : Optional[int] = '''imagenet-1k-id2label.json'''
lowercase : Optional[Any] = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Any = {int(__A ): v for k, v in idalabel.items()}
lowercase : Optional[Any] = idalabel
lowercase : Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowercase : List[str] = 3_84
lowercase : List[Any] = 15_36
lowercase : Dict = 12
lowercase : Union[str, Any] = 6
# load original model from torch hub
lowercase : Any = torch.hub.load('''facebookresearch/dino:main''' , __A )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase : Any = original_model.state_dict()
if base_model:
remove_classification_head_(__A )
lowercase : List[Any] = create_rename_keys(__A , base_model=__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
if base_model:
lowercase : List[str] = ViTModel(__A , add_pooling_layer=__A ).eval()
else:
lowercase : str = ViTForImageClassification(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by ViTImageProcessor
lowercase : Union[str, Any] = ViTImageProcessor()
lowercase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase : Union[str, Any] = encoding['''pixel_values''']
lowercase : int = model(__A )
if base_model:
lowercase : str = original_model(__A )
assert torch.allclose(__A , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
lowercase : Tuple = original_model(__A )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1e-3 )
Path(__A ).mkdir(exist_ok=__A )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
lowerCAmelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 308
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False , **lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = vocab_size
_snake_case = d_embed
_snake_case = d_proj
_snake_case = cutoffs + [vocab_size]
_snake_case = [0] + self.cutoffs
_snake_case = div_val
_snake_case = self.cutoffs[0]
_snake_case = len(self.cutoffs ) - 1
_snake_case = self.shortlist_size + self.n_clusters
_snake_case = keep_order
_snake_case = []
_snake_case = []
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.n_clusters > 0:
_snake_case = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_weight' )
_snake_case = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_snake_case = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case = self.d_embed // (self.div_val**i)
_snake_case = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' )
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = x
if proj is not None:
_snake_case = tf.einsum('ibd,ed->ibe' , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum('ibd,nd->ibn' , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = tf.range(lp_size[0] , dtype=target.dtype )
_snake_case = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = 0
if self.n_clusters == 0:
_snake_case = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_snake_case = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = []
_snake_case = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_snake_case = (target >= l_idx) & (target < r_idx)
_snake_case = tf.where(lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
_snake_case = self.out_layers[0][0][l_idx:r_idx]
_snake_case = self.out_layers[0][1][l_idx:r_idx]
else:
_snake_case = self.out_layers[i][0]
_snake_case = self.out_layers[i][1]
if i == 0:
_snake_case = tf.concat([cur_W, self.cluster_weight] , 0 )
_snake_case = tf.concat([cur_b, self.cluster_bias] , 0 )
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
_snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
_snake_case = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
_snake_case = tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
_snake_case = tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 42
| 0
|
"""simple docstring"""
from copy import deepcopy
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a = None , _a = None ):
"""simple docstring"""
if arr is None and size is not None:
lowerCamelCase = size
lowerCamelCase = [0] * size
elif arr is not None:
self.init(lowerCAmelCase_ )
else:
raise ValueError("""Either arr or size must be specified""" )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = len(lowerCAmelCase_ )
lowerCamelCase = deepcopy(lowerCAmelCase_ )
for i in range(1 , self.size ):
lowerCamelCase = self.next_(lowerCAmelCase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCamelCase = self.next_(lowerCAmelCase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _lowerCAmelCase ( _a ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def _lowerCAmelCase ( _a ):
"""simple docstring"""
return index - (index & (-index))
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCamelCase = self.next_(lowerCAmelCase_ )
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
self.add(lowerCAmelCase_ , value - self.get(lowerCAmelCase_ ) )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if right == 0:
return 0
lowerCamelCase = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCamelCase = self.prev(lowerCAmelCase_ )
return result
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
return self.prefix(lowerCAmelCase_ ) - self.prefix(lowerCAmelCase_ )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.query(lowerCAmelCase_ , index + 1 )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
lowerCamelCase = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCamelCase = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : Dict = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, float]:
_snake_case = len([g for position, g in enumerate(__A ) if g == main_target[position]] )
return (item, float(__A ))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, str]:
_snake_case = random.randint(0 , len(__A ) - 1 )
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = list(__A )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case = random.choice(__A )
return "".join(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , ) -> list[str]:
_snake_case = []
# Generate more children proportionally to the fitness score.
_snake_case = int(parent_a[1] * 100 ) + 1
_snake_case = 10 if child_n >= 10 else child_n
for _ in range(__A ):
_snake_case = population_score[random.randint(0 , __A )][0]
_snake_case , _snake_case = crossover(parent_a[0] , __A )
# Append new string to the population list.
pop.append(mutate(__A , __A ) )
pop.append(mutate(__A , __A ) )
return pop
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_snake_case = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(__A )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(__A )
# Generate random starting population.
_snake_case = []
for _ in range(__A ):
population.append(''.join([random.choice(__A ) for i in range(len(__A ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case , _snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__A )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case = [evaluate(__A , __A ) for item in population]
# Check if there is a matching evolution.
_snake_case = sorted(__A , key=lambda __A : x[1] , reverse=__A )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__A )
# Normalize population score to be between 0 and 1.
_snake_case = [
(item, score / len(__A )) for item, score in population_score
]
# This is selection
for i in range(__A ):
population.extend(select(population_score[int(__A )] , __A , __A ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__A ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowercase , lowercase , lowercase : Tuple = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 42
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a : Dict = 16
a : Dict = 32
def lowercase ( __magic_name__ , __magic_name__ = 16 ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase : Dict = load_dataset("glue" , "mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase : List[str] = datasets.map(
__A , batched=__A , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase : List[str] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase : int = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase : List[Any] = 8
else:
UpperCAmelCase : int = None
return tokenizer.pad(
__A , padding="longest" , max_length=__A , pad_to_multiple_of=__A , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["train"] , shuffle=__A , collate_fn=__A , batch_size=__A )
UpperCAmelCase : int = DataLoader(
tokenized_datasets["validation"] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a : Any = mocked_dataloaders # noqa: F811
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __A ) == "1":
UpperCAmelCase : str = 2
# Initialize accelerator
UpperCAmelCase : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase : List[str] = config["lr"]
UpperCAmelCase : Any = int(config["num_epochs"] )
UpperCAmelCase : Dict = int(config["seed"] )
UpperCAmelCase : List[str] = int(config["batch_size"] )
UpperCAmelCase : List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase : str = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase : str = MAX_GPU_BATCH_SIZE
set_seed(__A )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_dataloaders(__A , __A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase : int = AdamW(params=model.parameters() , lr=__A )
# Instantiate scheduler
UpperCAmelCase : str = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=100 , num_training_steps=(len(__A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = accelerator.prepare(
__A , __A , __A , __A , __A )
# Now we train the model
for epoch in range(__A ):
model.train()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase : List[str] = model(**__A )
UpperCAmelCase : Optional[int] = outputs.loss
UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCAmelCase : Tuple = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**__A )
UpperCAmelCase : Any = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase : Optional[int] = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__A ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCAmelCase : List[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__A , references=__A , )
UpperCAmelCase : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __A )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Tuple = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase : List[Any] = parser.parse_args()
UpperCAmelCase : List[str] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 311
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["ChineseCLIPFeatureExtractor"]
lowercase : List[Any] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
import os
lowerCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : str = 0
while index < len(__A ) - 1:
lowerCAmelCase : Any = SYMBOLS[numerals[index]]
lowerCAmelCase : Tuple = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : str = ""
lowerCAmelCase : str = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
lowerCAmelCase : Union[str, Any] = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
lowerCAmelCase : str = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a__ ( SCREAMING_SNAKE_CASE : Tuple = "/p089_roman.txt" ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 0
with open(os.path.dirname(__A ) + roman_numerals_filename ) as filea:
lowerCAmelCase : Any = filea.readlines()
for line in lines:
lowerCAmelCase : Optional[int] = line.strip()
lowerCAmelCase : Tuple = parse_roman_numerals(__A )
lowerCAmelCase : Any = generate_roman_numerals(__A )
savings += len(__A ) - len(__A )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 108
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = 1
_snake_case = 2
while i * i <= n:
_snake_case = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_snake_case = 1
_snake_case = 1
while True:
i += 1
t_num += i
if count_divisors(__A ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 42
| 0
|
"""simple docstring"""
from math import pow
def _snake_case ( _snake_case : Dict , _snake_case : Tuple , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Optional[int] , ) -> tuple[int, int]:
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_A = int(pow(__A , __A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_A , _A = backtrack(
__A , __A , current_number + 1 , __A , __A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_A , _A = backtrack(
__A , __A , current_number + 1 , __A , __A )
return current_sum, solutions_count
def _snake_case ( _snake_case : List[Any] , _snake_case : int ) -> int:
'''simple docstring'''
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(__A , __A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __UpperCAmelCase ( _lowerCamelCase ):
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='fill-mask' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_snake_case = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='fill-mask' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
_snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_snake_case = self.get_env()
_snake_case = '1'
_snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import AutoModel\n '
_snake_case = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 42
| 0
|
import re
def _A ( _lowercase ) -> str:
"""simple docstring"""
if len(re.findall('[ATCG]' , __A ) ) != len(__A ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCAmelCase_ ).to(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained('google/mt5-small' )
_snake_case = tokenizer('Hello there' , return_tensors='pt' ).input_ids
_snake_case = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
_snake_case = model(input_ids.to(lowerCAmelCase_ ) , labels=labels.to(lowerCAmelCase_ ) ).loss
_snake_case = -(labels.shape[-1] * loss.item())
_snake_case = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 42
| 0
|
import datasets
from .evaluate import evaluate
__UpperCamelCase : List[str] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
__UpperCamelCase : List[Any] = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
__UpperCamelCase : Tuple = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase__ ( datasets.Metric):
def __A ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def __A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE : Any = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE : int = evaluate(dataset=lowerCAmelCase_ , predictions=lowerCAmelCase_ )
return score
| 182
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : List[str] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : str = logging.get_logger(__name__)
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Tuple = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_UpperCamelCase : List[Any] = MaskFormerConfig(backbone_config=__A )
_UpperCamelCase : Union[str, Any] = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
_UpperCamelCase : int = 8_4_7
_UpperCamelCase : List[Any] = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
_UpperCamelCase : Optional[Any] = 1_5_0
_UpperCamelCase : Dict = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
_UpperCamelCase : Union[str, Any] = 1_7_1
_UpperCamelCase : Optional[Any] = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
_UpperCamelCase : Any = 1_3_3
_UpperCamelCase : Optional[Any] = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
_UpperCamelCase : Optional[int] = 1_9
_UpperCamelCase : Optional[Any] = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
_UpperCamelCase : Tuple = 6_5
_UpperCamelCase : Optional[Any] = 'mapillary-vistas-id2label.json'
_UpperCamelCase : str = json.load(open(hf_hub_download(__A , __A , repo_type='dataset' ) , 'r' ) )
_UpperCamelCase : str = {int(__A ): v for k, v in idalabel.items()}
return config
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.layers.{i}.downsample.reduction.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.layers.{i}.downsample.norm.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.layers.{i}.downsample.norm.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'sem_seg_head.adapter_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', f'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', f'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', f'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', f'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.weight', f'mask_embedder.{i}.0.weight') )
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.bias', f'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = dct.pop(__A )
_UpperCamelCase : Optional[Any] = val
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase : Optional[int] = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_UpperCamelCase : Dict = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : Optional[int] = in_proj_weight[:dim, :]
_UpperCamelCase : Tuple = in_proj_bias[: dim]
_UpperCamelCase : Dict = in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
_UpperCamelCase : int = in_proj_weight[
-dim :, :
]
_UpperCamelCase : int = in_proj_bias[-dim :]
# fmt: on
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
# fmt: off
_UpperCamelCase : Union[str, Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_UpperCamelCase : Dict = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_UpperCamelCase : Tuple = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_UpperCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_UpperCamelCase : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_UpperCamelCase : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase : Union[str, Any] = in_proj_weight[-hidden_size :, :]
_UpperCamelCase : List[str] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_UpperCamelCase : str = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_UpperCamelCase : Dict = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : Union[str, Any] = in_proj_weight[: hidden_size, :]
_UpperCamelCase : Tuple = in_proj_bias[:config.hidden_size]
_UpperCamelCase : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
_UpperCamelCase : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase : List[str] = in_proj_weight[-hidden_size :, :]
_UpperCamelCase : List[str] = in_proj_bias[-hidden_size :]
# fmt: on
def A__ ( ):
_UpperCamelCase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCamelCase : Dict = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False ):
_UpperCamelCase : str = get_maskformer_config(__A )
# load original state_dict
with open(__A , 'rb' ) as f:
_UpperCamelCase : int = pickle.load(__A )
_UpperCamelCase : List[Any] = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_UpperCamelCase : str = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# update to torch tensors
for key, value in state_dict.items():
_UpperCamelCase : List[Any] = torch.from_numpy(__A )
# load 🤗 model
_UpperCamelCase : Dict = MaskFormerForInstanceSegmentation(__A )
model.eval()
for name, param in model.named_parameters():
print(__A , param.shape )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = model.load_state_dict(__A , strict=__A )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__A ) == 0, f'Unexpected keys: {unexpected_keys}'
# verify results
_UpperCamelCase : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_UpperCamelCase : int = 6_5
elif "cityscapes" in model_name:
_UpperCamelCase : Dict = 6_5_5_3_5
else:
_UpperCamelCase : Tuple = 2_5_5
_UpperCamelCase : Tuple = True if 'ade' in model_name else False
_UpperCamelCase : Any = MaskFormerImageProcessor(ignore_index=__A , reduce_labels=__A )
_UpperCamelCase : Tuple = image_processor(__A , return_tensors='pt' )
_UpperCamelCase : List[Any] = model(**__A )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_UpperCamelCase : Any = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
image_processor.save_pretrained(__A )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(f'nielsr/{model_name}' )
image_processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
snake_case_ : str = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 83
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowercase : set[int] = {ord(char) for char in VALID_CHARS}
lowercase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str | None:
_snake_case = ""
_snake_case = 42
_snake_case = 42
_snake_case = 42
for keychar, cipherchar in zip(cycle(__A ) , __A ):
_snake_case = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__A )
return decoded
def SCREAMING_SNAKE_CASE__ ( __A ) -> list[str]:
_snake_case = []
for key in product(__A , repeat=3 ):
_snake_case = try_key(__A , __A )
if encoded is not None:
possibles.append(__A )
return possibles
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__ ( __A = "p059_cipher.txt" ) -> int:
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = Path(__A ).parent.joinpath(__A ).read_text(encoding='utf-8' )
_snake_case = [int(__A ) for number in data.strip().split(',' )]
_snake_case = filter_valid_chars(__A )
for common_word in COMMON_WORDS:
_snake_case = filter_common_word(__A , __A )
if len(__A ) == 1:
break
_snake_case = possibles[0]
return sum(ord(__A ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
"""simple docstring"""
import os
def _a ( _SCREAMING_SNAKE_CASE = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(__A ) , __A ) ) as input_file:
snake_case_ = [
[int(__A ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
snake_case_ = len(__A )
snake_case_ = len(matrix[0] )
snake_case_ = [[-1 for _ in range(__A )] for _ in range(__A )]
for i in range(__A ):
snake_case_ = matrix[i][0]
for j in range(1 , __A ):
for i in range(__A ):
snake_case_ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __A ):
snake_case_ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
snake_case_ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A = 1_000_000 ) -> int:
_snake_case = limit + 1
_snake_case = [0] * limit
for first_term in range(1 , __A ):
for n in range(__A , __A , __A ):
_snake_case = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_snake_case = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
import tensorflow as tf
from ...tf_utils import shape_list
class snake_case_ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Dict=1 , lowercase_ : Any=False , **lowercase_ : Dict ) -> Optional[int]:
super().__init__(**lowerCAmelCase_ )
lowercase__ : Optional[Any] = vocab_size
lowercase__ : int = d_embed
lowercase__ : Union[str, Any] = d_proj
lowercase__ : Optional[Any] = cutoffs + [vocab_size]
lowercase__ : Optional[Any] = [0] + self.cutoffs
lowercase__ : int = div_val
lowercase__ : str = self.cutoffs[0]
lowercase__ : List[Any] = len(self.cutoffs ) - 1
lowercase__ : List[Any] = self.shortlist_size + self.n_clusters
lowercase__ : Any = keep_order
lowercase__ : Dict = []
lowercase__ : int = []
def __UpperCamelCase ( self : Tuple , lowercase_ : int ) -> Union[str, Any]:
if self.n_clusters > 0:
lowercase__ : Any = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=lowerCAmelCase_ , name="cluster_weight" )
lowercase__ : Any = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=lowerCAmelCase_ , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowercase__ : Optional[int] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=lowerCAmelCase_ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
lowercase__ : Optional[int] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._weight''' , )
lowercase__ : List[str] = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowercase__ , lowercase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ : List[Any] = self.d_embed // (self.div_val**i)
lowercase__ : Any = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=lowerCAmelCase_ , name=F'''out_projs_._{i}''' )
self.out_projs.append(lowerCAmelCase_ )
lowercase__ : Tuple = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._weight''' , )
lowercase__ : List[str] = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Tuple=None ) -> Any:
lowercase__ : Any = x
if proj is not None:
lowercase__ : str = tf.einsum("ibd,ed->ibe" , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum("ibd,nd->ibn" , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def __UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Any ) -> Optional[int]:
lowercase__ : Tuple = shape_list(lowerCAmelCase_ )
lowercase__ : str = tf.range(lp_size[0] , dtype=target.dtype )
lowercase__ : str = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def __UpperCamelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=False ) -> Union[str, Any]:
lowercase__ : List[str] = 0
if self.n_clusters == 0:
lowercase__ : Any = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowercase__ : int = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
lowercase__ : Optional[Any] = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
lowercase__ : Union[str, Any] = shape_list(lowerCAmelCase_ )
lowercase__ : Dict = []
lowercase__ : Tuple = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowercase__ , lowercase__ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowercase__ : Optional[int] = (target >= l_idx) & (target < r_idx)
lowercase__ : Tuple = tf.where(lowerCAmelCase_ )
lowercase__ : Union[str, Any] = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
lowercase__ : Optional[Any] = self.out_layers[0][0][l_idx:r_idx]
lowercase__ : List[str] = self.out_layers[0][1][l_idx:r_idx]
else:
lowercase__ : Any = self.out_layers[i][0]
lowercase__ : int = self.out_layers[i][1]
if i == 0:
lowercase__ : Union[str, Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
lowercase__ : Optional[Any] = tf.concat([cur_b, self.cluster_bias] , 0 )
lowercase__ : Any = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
lowercase__ : Optional[Any] = tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowercase__ : List[str] = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ : Any = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
lowercase__ : List[Any] = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
lowercase__ : str = tf.nn.log_softmax(lowerCAmelCase_ )
lowercase__ : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowercase__ : int = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
lowercase__ : Tuple = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ : Any = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ : Dict = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
lowercase__ : Optional[Any] = tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
lowercase__ : Optional[int] = tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 87
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Tuple = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> str:
_snake_case : int = 1
_snake_case : Dict = 2
while i * i <= n:
_snake_case : Union[str, Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowercase ( ) -> List[str]:
_snake_case : Tuple = 1
_snake_case : Dict = 1
while True:
i += 1
t_num += i
if count_divisors(__A ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 317
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE__ ( __A = 1_500_000 ) -> int:
_snake_case = defaultdict(__A )
_snake_case = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ):
if gcd(__A , __A ) > 1:
continue
_snake_case = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__A , limit + 1 , __A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase_ = {
"bert-base-uncased": 5_12,
"bert-large-uncased": 5_12,
"bert-base-cased": 5_12,
"bert-large-cased": 5_12,
"bert-base-multilingual-uncased": 5_12,
"bert-base-multilingual-cased": 5_12,
"bert-base-chinese": 5_12,
"bert-base-german-cased": 5_12,
"bert-large-uncased-whole-word-masking": 5_12,
"bert-large-cased-whole-word-masking": 5_12,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_12,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_12,
"bert-base-cased-finetuned-mrpc": 5_12,
"bert-base-german-dbmdz-cased": 5_12,
"bert-base-german-dbmdz-uncased": 5_12,
"TurkuNLP/bert-base-finnish-cased-v1": 5_12,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_12,
"wietsedv/bert-base-dutch-cased": 5_12,
}
lowerCAmelCase_ = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Tuple = BertTokenizer
def __init__( self : Dict , _A : int=None , _A : Dict=None , _A : Union[str, Any]=True , _A : Any="[UNK]" , _A : Any="[SEP]" , _A : Tuple="[PAD]" , _A : int="[CLS]" , _A : int="[MASK]" , _A : List[str]=True , _A : Tuple=None , **_A : Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
lowercase : Optional[int] = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
lowercase : Tuple = do_lower_case
lowercase : List[str] = strip_accents
lowercase : List[Any] = tokenize_chinese_chars
lowercase : str = normalizer_class(**lowerCAmelCase_ )
lowercase : Union[str, Any] = do_lower_case
def __a ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ) -> List[str]:
"""simple docstring"""
lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : str = None ) -> Any:
"""simple docstring"""
lowercase : Dict = [self.sep_token_id]
lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : List[Any] , _A : Dict , _A : str = None ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 308
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowercase : Optional[Any] = False
class __UpperCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger '
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = generator.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger '
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
_snake_case = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 42
| 0
|
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = question_encoder
lowerCamelCase = generator
lowerCamelCase = self.question_encoder
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if os.path.isfile(lowerCAmelCase_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
lowerCamelCase = os.path.join(lowerCAmelCase_ , """question_encoder_tokenizer""" )
lowerCamelCase = os.path.join(lowerCAmelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(lowerCAmelCase_ )
self.generator.save_pretrained(lowerCAmelCase_ )
@classmethod
def _lowerCAmelCase ( cls , _a , **_a ):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase = kwargs.pop("""config""" , lowerCAmelCase_ )
if config is None:
lowerCamelCase = RagConfig.from_pretrained(lowerCAmelCase_ )
lowerCamelCase = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCamelCase = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=lowerCAmelCase_ , generator=lowerCAmelCase_ )
def __call__( self , *_a , **_a ):
"""simple docstring"""
return self.current_tokenizer(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , *_a , **_a ):
"""simple docstring"""
return self.generator.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , *_a , **_a ):
"""simple docstring"""
return self.generator.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.question_encoder
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.generator
def _lowerCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = "longest" , _a = None , _a = True , **_a , ):
"""simple docstring"""
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , lowerCAmelCase_ , )
if max_length is None:
lowerCamelCase = self.current_tokenizer.model_max_length
lowerCamelCase = self(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase = self.current_tokenizer.model_max_length
lowerCamelCase = self(
text_target=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowerCamelCase = labels["""input_ids"""]
return model_inputs
| 291
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A = 100 ) -> int:
_snake_case = n * (n + 1) * (2 * n + 1) / 6
_snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase_ ).to(lowerCAmelCase_ )
UpperCAmelCase : int = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase : Dict = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase : Optional[Any] = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase : int = model(input_ids.to(lowerCAmelCase_ ) , labels=labels.to(lowerCAmelCase_ ) ).loss
UpperCAmelCase : Any = -(labels.shape[-1] * loss.item())
UpperCAmelCase : int = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 311
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : str = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Dict:
for attribute in key.split('.' ):
_snake_case = getattr(__A , __A )
if weight_type is not None:
_snake_case = getattr(__A , __A ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
_snake_case = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__A )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , __A )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "weight" in name:
_snake_case = 'weight'
elif "bias" in name:
_snake_case = 'bias'
else:
_snake_case = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> int:
_snake_case = full_name.split('conv_layers.' )[-1]
_snake_case = name.split('.' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = SEWConfig()
if is_finetuned:
_snake_case = model.wav_encoder.wav_model.cfg
else:
_snake_case = model.cfg
_snake_case = fs_config.conv_bias
_snake_case = eval(fs_config.conv_feature_layers )
_snake_case = [x[0] for x in conv_layers]
_snake_case = [x[1] for x in conv_layers]
_snake_case = [x[2] for x in conv_layers]
_snake_case = 'gelu'
_snake_case = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
_snake_case = 0.0
_snake_case = fs_config.activation_fn.name
_snake_case = fs_config.encoder_embed_dim
_snake_case = 0.0_2
_snake_case = fs_config.encoder_ffn_embed_dim
_snake_case = 1e-5
_snake_case = fs_config.encoder_layerdrop
_snake_case = fs_config.encoder_attention_heads
_snake_case = fs_config.conv_pos_groups
_snake_case = fs_config.conv_pos
_snake_case = len(__A )
_snake_case = fs_config.encoder_layers
_snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_snake_case = model.cfg
_snake_case = fs_config.final_dropout
_snake_case = fs_config.layerdrop
_snake_case = fs_config.activation_dropout
_snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_snake_case = fs_config.attention_dropout
_snake_case = fs_config.dropout_input
_snake_case = fs_config.dropout
_snake_case = fs_config.mask_channel_length
_snake_case = fs_config.mask_channel_prob
_snake_case = fs_config.mask_length
_snake_case = fs_config.mask_prob
_snake_case = 'Wav2Vec2FeatureExtractor'
_snake_case = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=None , __A=None , __A=True ) -> List[str]:
if is_finetuned:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_snake_case = SEWConfig.from_pretrained(__A )
else:
_snake_case = convert_config(model[0] , __A )
_snake_case = model[0].eval()
_snake_case = True if config.feat_extract_norm == 'layer' else False
_snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
if is_finetuned:
if dict_path:
_snake_case = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.eos_index
_snake_case = len(target_dict.symbols )
_snake_case = os.path.join(__A , 'vocab.json' )
if not os.path.isdir(__A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
with open(__A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , __A )
_snake_case = WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__A , )
_snake_case = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
_snake_case = SEWForCTC(__A )
else:
_snake_case = SEWModel(__A )
feature_extractor.save_pretrained(__A )
recursively_load_weights(__A , __A , __A )
hf_model.save_pretrained(__A )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase : Union[str, Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 42
| 0
|
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = 0
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowerCAmelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowerCAmelCase_ ) , 0 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
# Check that tokenizer_type ≠ model_type
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ , config=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(lowerCAmelCase_ , "vocab.txt" ) )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase_ , tokenizer_type="bert" , use_fast=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(lowerCAmelCase_ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(lowerCAmelCase_ , "merges.txt" ) )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(lowerCAmelCase_ , tokenizer_type="gpt2" , use_fast=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(lowerCAmelCase_ , "vocab.txt" ) )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ , tokenizer_type="bert" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(lowerCAmelCase_ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(lowerCAmelCase_ , "merges.txt" ) )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase_ , tokenizer_type="gpt2" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase_ ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCAmelCase : Optional[Any] = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowerCAmelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , lowerCAmelCase_ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowerCAmelCase_ , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
lowerCAmelCase : Dict = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TOKENIZER_MAPPING.values()
lowerCAmelCase : Any = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowerCAmelCase_ )
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , lowerCAmelCase_ )
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=lowerCAmelCase_ )
lowerCAmelCase : List[Any] = "Hello, world. How are you?"
lowerCAmelCase : int = tokenizer.tokenize(lowerCAmelCase_ )
self.assertEqual("[UNK]" , tokens[0] )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=lowerCAmelCase_ )
lowerCAmelCase : str = tokenizer.tokenize(lowerCAmelCase_ )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = get_tokenizer_config("bert-base-cased" )
lowerCAmelCase : Optional[int] = config.pop("_commit_hash" , lowerCAmelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowerCAmelCase_ , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCAmelCase : int = get_tokenizer_config(lowerCAmelCase_ )
self.assertDictEqual(lowerCAmelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
lowerCAmelCase : Union[str, Any] = get_tokenizer_config(lowerCAmelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def lowercase__ ( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , lowerCAmelCase_ )
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
lowerCAmelCase : Union[str, Any] = CustomTokenizer.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , lowerCAmelCase_ )
# Can register in two steps
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowerCAmelCase_ , fast_tokenizer_class=lowerCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ , fast_tokenizer_class=lowerCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoTokenizer.register(lowerCAmelCase_ , fast_tokenizer_class=lowerCAmelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained(lowerCAmelCase_ )
bert_tokenizer.save_pretrained(lowerCAmelCase_ )
lowerCAmelCase : Optional[Any] = CustomTokenizerFast.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase_ ):
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowerCAmelCase_ )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowerCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
"""simple docstring"""
a : Tuple =False
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
"""simple docstring"""
a : List[str] =NewTokenizer
a : List[Any] =False
try:
AutoConfig.register("custom" , lowerCAmelCase_ )
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
AutoTokenizer.register(lowerCAmelCase_ , fast_tokenizer_class=lowerCAmelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=lowerCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier" ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained("bert-base" )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase_ , revision="aaaaaa" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 108
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """xlnet"""
__lowercase = ["""mems"""]
__lowercase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase_=3_20_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=40_96 , lowerCAmelCase_="gelu" , lowerCAmelCase_=True , lowerCAmelCase_="bi" , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=-1 , lowerCAmelCase_=False , lowerCAmelCase_="last" , lowerCAmelCase_=True , lowerCAmelCase_="tanh" , lowerCAmelCase_=0.1 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = d_model
_snake_case = n_layer
_snake_case = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
_snake_case = d_model // n_head
_snake_case = ff_activation
_snake_case = d_inner
_snake_case = untie_r
_snake_case = attn_type
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = dropout
_snake_case = mem_len
_snake_case = reuse_len
_snake_case = bi_data
_snake_case = clamp_len
_snake_case = same_length
_snake_case = summary_type
_snake_case = summary_use_proj
_snake_case = summary_activation
_snake_case = summary_last_dropout
_snake_case = start_n_top
_snake_case = end_n_top
_snake_case = bos_token_id
_snake_case = pad_token_id
_snake_case = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , lowerCAmelCase_ , )
_snake_case = kwargs['use_cache']
_snake_case = use_mems_eval
_snake_case = use_mems_train
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 42
| 0
|
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowercase_ ( _lowerCamelCase ):
'''simple docstring'''
@require_torch
def lowerCAmelCase_ ( self : Any ):
_A = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_A = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_A = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_A = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='fill-mask' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_A = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '1'
_A = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_A = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_A = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_A = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='fill-mask' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
_A = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_A = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_A = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
_A = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '1'
_A = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
_A = '\nfrom transformers import pipeline\n '
_A = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_A = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_A = self.get_env()
_A = '1'
_A = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_A = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCAmelCase_ ( self : Tuple ):
_A = '\nfrom transformers import AutoModel\n '
_A = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '1'
_A = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 315
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
_snake_case = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_snake_case = model(lowerCAmelCase_ )['last_hidden_state']
_snake_case = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
# compare the actual values for a slice.
_snake_case = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 42
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
__UpperCamelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
__UpperCamelCase = 'A painting of a squirrel eating a burger'
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt],generator=lowerCAmelCase_,guidance_scale=9.0,num_inference_steps=20,output_type='np' )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__UpperCamelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
__UpperCamelCase = 'A painting of a squirrel eating a burger'
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt],generator=lowerCAmelCase_,guidance_scale=9.0,num_inference_steps=20,output_type='np' )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__UpperCamelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
__UpperCamelCase = 'A painting of a squirrel eating a burger'
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt],generator=lowerCAmelCase_,guidance_scale=7.5,num_inference_steps=15,output_type='np',use_karras_sigmas=lowerCAmelCase_,)
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 310
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 42
| 0
|
from __future__ import annotations
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = len(__A )
# We need to create solution object to save path.
SCREAMING_SNAKE_CASE : Dict = [[0 for _ in range(__A )] for _ in range(__A )]
SCREAMING_SNAKE_CASE : int = run_maze(__A , 0 , 0 , __A )
if solved:
print('''\n'''.join(str(__A ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = len(__A )
# Final check point.
if i == j == (size - 1):
SCREAMING_SNAKE_CASE : List[Any] = 1
return True
SCREAMING_SNAKE_CASE : List[str] = (not i < 0) and (not j < 0) # Check lower bounds
SCREAMING_SNAKE_CASE : Optional[int] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
SCREAMING_SNAKE_CASE : List[str] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
SCREAMING_SNAKE_CASE : Tuple = 1
# check for directions
if (
run_maze(__A , i + 1 , __A , __A )
or run_maze(__A , __A , j + 1 , __A )
or run_maze(__A , i - 1 , __A , __A )
or run_maze(__A , __A , j - 1 , __A )
):
return True
SCREAMING_SNAKE_CASE : Tuple = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase : List[str] = logging.get_logger("transformers.models.speecht5")
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Dict:
hf_model.apply_weight_norm()
_snake_case = checkpoint['input_conv.weight_g']
_snake_case = checkpoint['input_conv.weight_v']
_snake_case = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
_snake_case = checkpoint[F'upsamples.{i}.1.weight_g']
_snake_case = checkpoint[F'upsamples.{i}.1.weight_v']
_snake_case = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
_snake_case = checkpoint['output_conv.1.weight_g']
_snake_case = checkpoint['output_conv.1.weight_v']
_snake_case = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A=None , __A=None , ) -> List[Any]:
if config_path is not None:
_snake_case = SpeechTaHifiGanConfig.from_pretrained(__A )
else:
_snake_case = SpeechTaHifiGanConfig()
_snake_case = SpeechTaHifiGan(__A )
_snake_case = torch.load(__A )
load_weights(orig_checkpoint['model']['generator'] , __A , __A )
_snake_case = np.load(__A )
_snake_case = stats[0].reshape(-1 )
_snake_case = stats[1].reshape(-1 )
_snake_case = torch.from_numpy(__A ).float()
_snake_case = torch.from_numpy(__A ).float()
model.save_pretrained(__A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__A )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase : List[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 42
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
lowercase__ = StableDiffusionLDMaDPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_UpperCamelCase : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=lowerCAmelCase_ ,set_alpha_to_one=lowerCAmelCase_ ,)
torch.manual_seed(0 )
_UpperCamelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_UpperCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_UpperCamelCase : Any = CLIPTextModel(lowerCAmelCase_ )
_UpperCamelCase : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_UpperCamelCase : str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int]=0 ):
'''simple docstring'''
if str(lowerCAmelCase_ ).startswith('mps' ):
_UpperCamelCase : Optional[int] = torch.manual_seed(lowerCAmelCase_ )
else:
_UpperCamelCase : List[str] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_UpperCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Any = self.get_dummy_components()
_UpperCamelCase : Dict = StableDiffusionLDMaDPipeline(**lowerCAmelCase_ )
_UpperCamelCase : Union[str, Any] = ldmad_pipe.to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_UpperCamelCase : List[str] = self.get_dummy_inputs(lowerCAmelCase_ )
_UpperCamelCase : Union[str, Any] = ldmad_pipe(**lowerCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Dict = output.rgb, output.depth
_UpperCamelCase : int = rgb[0, -3:, -3:, -1]
_UpperCamelCase : Optional[int] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCamelCase : int = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
_UpperCamelCase : Union[str, Any] = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : int = self.get_dummy_components()
_UpperCamelCase : List[str] = StableDiffusionLDMaDPipeline(**lowerCAmelCase_ )
_UpperCamelCase : Tuple = ldmad_pipe.to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_UpperCamelCase : Dict = self.get_dummy_inputs(lowerCAmelCase_ )
_UpperCamelCase : int = 3 * [inputs['prompt']]
# forward
_UpperCamelCase : Tuple = ldmad_pipe(**lowerCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = output.rgb, output.depth
_UpperCamelCase : Any = rgb_slice_a[0, -3:, -3:, -1]
_UpperCamelCase : Optional[Any] = depth_slice_a[0, -3:, -1]
_UpperCamelCase : int = self.get_dummy_inputs(lowerCAmelCase_ )
_UpperCamelCase : Union[str, Any] = 3 * [inputs.pop('prompt' )]
_UpperCamelCase : Tuple = ldmad_pipe.tokenizer(
lowerCAmelCase_ ,padding='max_length' ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=lowerCAmelCase_ ,return_tensors='pt' ,)
_UpperCamelCase : Union[str, Any] = text_inputs['input_ids'].to(lowerCAmelCase_ )
_UpperCamelCase : Dict = ldmad_pipe.text_encoder(lowerCAmelCase_ )[0]
_UpperCamelCase : Any = prompt_embeds
# forward
_UpperCamelCase : List[str] = ldmad_pipe(**lowerCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Dict = output.rgb, output.depth
_UpperCamelCase : int = rgb_slice_a[0, -3:, -3:, -1]
_UpperCamelCase : List[str] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = self.get_dummy_components()
_UpperCamelCase : List[str] = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
_UpperCamelCase : str = StableDiffusionLDMaDPipeline(**lowerCAmelCase_ )
_UpperCamelCase : List[str] = ldmad_pipe.to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_UpperCamelCase : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
_UpperCamelCase : Optional[int] = 'french fries'
_UpperCamelCase : str = ldmad_pipe(**lowerCAmelCase_ ,negative_prompt=lowerCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : List[str] = output.rgb, output.depth
_UpperCamelCase : Optional[Any] = rgb[0, -3:, -3:, -1]
_UpperCamelCase : List[str] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCamelCase : List[str] = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
_UpperCamelCase : Tuple = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict="cpu" ,lowerCamelCase__ : Optional[int]=torch.floataa ,lowerCamelCase__ : Union[str, Any]=0 ):
'''simple docstring'''
_UpperCamelCase : Dict = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_UpperCamelCase : List[str] = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase : List[str] = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ ,dtype=lowerCAmelCase_ )
_UpperCamelCase : str = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
_UpperCamelCase : List[Any] = ldmad_pipe.to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_UpperCamelCase : str = self.get_inputs(lowerCAmelCase_ )
_UpperCamelCase : List[str] = ldmad_pipe(**lowerCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Dict = output.rgb, output.depth
_UpperCamelCase : List[str] = rgb[0, -3:, -3:, -1].flatten()
_UpperCamelCase : Union[str, Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_UpperCamelCase : Tuple = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
_UpperCamelCase : int = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any]="cpu" ,lowerCamelCase__ : List[Any]=torch.floataa ,lowerCamelCase__ : Dict=0 ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_UpperCamelCase : Dict = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase : Dict = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ ,dtype=lowerCAmelCase_ )
_UpperCamelCase : List[str] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : str = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_UpperCamelCase : Dict = self.get_inputs(lowerCAmelCase_ )
_UpperCamelCase : Union[str, Any] = ldmad_pipe(**lowerCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = output.rgb, output.depth
_UpperCamelCase : Dict = 0.4_9_5_5_8_6
_UpperCamelCase : Tuple = 0.3_3_7_9_5_5_1_5
_UpperCamelCase : Tuple = 1_1_2.4_8_5_1_8
_UpperCamelCase : Union[str, Any] = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_UpperCamelCase : Union[str, Any] = self.get_inputs(lowerCAmelCase_ )
_UpperCamelCase : Optional[Any] = ldmad_pipe(**lowerCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = output.rgb, output.depth
_UpperCamelCase : Optional[Any] = 0.4_1_9_4_1_2_7
_UpperCamelCase : Union[str, Any] = 0.3_5_3_7_5_5_8_6
_UpperCamelCase : Any = 0.5_6_3_8_5_0_2
_UpperCamelCase : Tuple = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 83
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("DownEncoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_=True , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = torch.nn.Convad(
lowerCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
# down
_snake_case = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_down_block(
lowerCAmelCase_ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# out
_snake_case = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = 2 * out_channels if double_z else out_channels
_snake_case = nn.Convad(block_out_channels[-1] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = x
_snake_case = self.conv_in(lowerCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
_snake_case = down_block(lowerCAmelCase_ )
# middle
_snake_case = self.mid_block(lowerCAmelCase_ )
# post-process
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("UpDecoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_="group" , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = nn.Convad(
lowerCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
_snake_case = in_channels if norm_type == 'spatial' else None
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# up
_snake_case = list(reversed(lowerCAmelCase_ ) )
_snake_case = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = reversed_block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_up_block(
lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , resnet_time_scale_shift=lowerCAmelCase_ , )
self.up_blocks.append(lowerCAmelCase_ )
_snake_case = output_channel
# out
if norm_type == "spatial":
_snake_case = SpatialNorm(block_out_channels[0] , lowerCAmelCase_ )
else:
_snake_case = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = nn.Convad(block_out_channels[0] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = z
_snake_case = self.conv_in(lowerCAmelCase_ )
_snake_case = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# middle
_snake_case = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = up_block(lowerCAmelCase_ , lowerCAmelCase_ )
# post-process
if latent_embeds is None:
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
else:
_snake_case = self.conv_norm_out(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="random" , lowerCAmelCase_=False , lowerCAmelCase_=True ):
"""simple docstring"""
super().__init__()
_snake_case = n_e
_snake_case = vq_embed_dim
_snake_case = beta
_snake_case = legacy
_snake_case = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_snake_case = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
_snake_case = self.used.shape[0]
_snake_case = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_snake_case = self.re_embed
_snake_case = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
_snake_case = n_e
_snake_case = sane_index_shape
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
_snake_case = (inds[:, :, None] == used[None, None, ...]).long()
_snake_case = match.argmax(-1 )
_snake_case = match.sum(2 ) < 1
if self.unknown_index == "random":
_snake_case = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_snake_case = self.unknown_index
return new.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_snake_case = 0 # simply set to zero
_snake_case = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase_ )
return back.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = z.permute(0 , 2 , 3 , 1 ).contiguous()
_snake_case = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_snake_case = torch.argmin(torch.cdist(lowerCAmelCase_ , self.embedding.weight ) , dim=1 )
_snake_case = self.embedding(lowerCAmelCase_ ).view(z.shape )
_snake_case = None
_snake_case = None
# compute loss for embedding
if not self.legacy:
_snake_case = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_snake_case = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_snake_case = z + (z_q - z).detach()
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_snake_case = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_snake_case = self.remap_to_used(lowerCAmelCase_ )
_snake_case = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_snake_case = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if self.remap is not None:
_snake_case = indices.reshape(shape[0] , -1 ) # add batch axis
_snake_case = self.unmap_to_all(lowerCAmelCase_ )
_snake_case = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_snake_case = self.embedding(lowerCAmelCase_ )
if shape is not None:
_snake_case = z_q.view(lowerCAmelCase_ )
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = parameters
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , dim=1 )
_snake_case = torch.clamp(self.logvar , -30.0 , 20.0 )
_snake_case = deterministic
_snake_case = torch.exp(0.5 * self.logvar )
_snake_case = torch.exp(self.logvar )
if self.deterministic:
_snake_case = _snake_case = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase ( self , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = randn_tensor(
self.mean.shape , generator=lowerCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_snake_case = self.mean + self.std * sample
return x
def lowerCamelCase ( self , lowerCAmelCase_=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
_snake_case = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
return self.mean
| 42
| 0
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
@require_torch
def lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
snake_case_ = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
snake_case_ = load_dataset("""ashraq/esc50""" )
snake_case_ = dataset["""train"""]["""audio"""][-1]["""array"""]
snake_case_ = audio_classifier(lowerCAmelCase_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
pass
@slow
@require_torch
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
snake_case_ = load_dataset("""ashraq/esc50""" )
snake_case_ = dataset["""train"""]["""audio"""][-1]["""array"""]
snake_case_ = audio_classifier(lowerCAmelCase_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
snake_case_ = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
snake_case_ = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
pass
| 347
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = "▁"
UpperCamelCase = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
UpperCamelCase = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
UpperCamelCase = {
"facebook/s2t-small-librispeech-asr": 1024,
}
UpperCamelCase = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
UpperCamelCase = {"mustc": MUSTC_LANGS}
class snake_case_ ( _lowerCamelCase ):
__A : Dict = VOCAB_FILES_NAMES
__A : int = PRETRAINED_VOCAB_FILES_MAP
__A : Tuple = MAX_MODEL_INPUT_SIZES
__A : Any = ["input_ids", "attention_mask"]
__A : List[str] = []
def __init__( self : List[Any] , lowercase_ : Dict , lowercase_ : str , lowercase_ : Tuple="<s>" , lowercase_ : List[Any]="</s>" , lowercase_ : Optional[int]="<pad>" , lowercase_ : Dict="<unk>" , lowercase_ : List[Any]=False , lowercase_ : Tuple=False , lowercase_ : Union[str, Any]=None , lowercase_ : Any=None , lowercase_ : int = None , **lowercase_ : Optional[int] , ) -> Optional[Any]:
lowercase__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , do_upper_case=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , lang_codes=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
lowercase__ : Optional[int] = do_upper_case
lowercase__ : Any = do_lower_case
lowercase__ : Dict = load_json(lowerCAmelCase_ )
lowercase__ : Optional[int] = {v: k for k, v in self.encoder.items()}
lowercase__ : str = spm_file
lowercase__ : Any = load_spm(lowerCAmelCase_ , self.sp_model_kwargs )
if lang_codes is not None:
lowercase__ : str = lang_codes
lowercase__ : Tuple = LANGUAGES[lang_codes]
lowercase__ : Dict = [F'''<lang:{lang}>''' for lang in self.langs]
lowercase__ : Tuple = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
lowercase__ : Any = self.lang_tokens
lowercase__ : Tuple = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowercase__ : List[str] = {}
@property
def __UpperCamelCase ( self : str ) -> Optional[int]:
return len(self.encoder )
@property
def __UpperCamelCase ( self : str ) -> int:
return self._tgt_lang
@tgt_lang.setter
def __UpperCamelCase ( self : Any , lowercase_ : Optional[int] ) -> Optional[int]:
lowercase__ : Union[str, Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCAmelCase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str ) -> Dict:
lowercase__ : Optional[Any] = self.lang_code_to_id[tgt_lang]
lowercase__ : Optional[Any] = [lang_code_id]
def __UpperCamelCase ( self : List[Any] , lowercase_ : str ) -> str:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def __UpperCamelCase ( self : str , lowercase_ : Union[str, Any] ) -> List[str]:
return self.encoder.get(lowerCAmelCase_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Any ) -> Union[str, Any]:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __UpperCamelCase ( self : int , lowercase_ : Tuple ) -> int:
lowercase__ : Any = []
lowercase__ : Optional[Any] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowercase__ : Tuple = self.sp_model.decode(lowerCAmelCase_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowercase__ : Dict = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
lowercase__ : Dict = self.sp_model.decode(lowerCAmelCase_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=None ) -> Optional[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : int = None , lowercase_ : List[Any] = False ) -> List[str]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
lowercase__ : List[str] = [1] * len(self.prefix_tokens )
lowercase__ : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase_ )) + ([0] * len(lowerCAmelCase_ )) + suffix_ones
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
lowercase__ : List[str] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Union[str, Any]:
lowercase__ : Optional[Any] = self.__dict__.copy()
lowercase__ : List[Any] = None
return state
def __setstate__( self : int , lowercase_ : List[str] ) -> Optional[int]:
lowercase__ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ : Any = {}
lowercase__ : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self : Tuple , lowercase_ : Tuple , lowercase_ : str = None ) -> Union[str, Any]:
lowercase__ : Dict = Path(lowerCAmelCase_ )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
lowercase__ : Tuple = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowercase__ : Optional[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowerCAmelCase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCAmelCase_ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCAmelCase_ , "wb" ) as fi:
lowercase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (str(lowerCAmelCase_ ), str(lowerCAmelCase_ ))
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[Any]):
lowercase__ : Any = sentencepiece.SentencePieceProcessor(**__A)
spm.Load(str(__A))
return spm
def lowercase_ ( _lowerCamelCase : str):
with open(__A , "r") as f:
return json.load(__A)
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int]):
with open(__A , "w") as f:
json.dump(__A , __A , indent=2)
| 87
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
lowercase : Tuple = parser.parse_args()
lowercase : Optional[int] = "cpu"
lowercase : Optional[Any] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
lowercase : Optional[int] = "path-to-your-trained-model"
lowercase : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase : Dict = pipe.to(device)
# to channels last
lowercase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
lowercase : int = pipe.vae.to(memory_format=torch.channels_last)
lowercase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase : Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase : Any = torch.randn(2, 4, 64, 64)
lowercase : Optional[int] = torch.rand(1) * 999
lowercase : Optional[Any] = torch.randn(2, 77, 768)
lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
lowercase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Optional[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase : List[str] = 666
lowercase : Tuple = torch.Generator(device).manual_seed(seed)
lowercase : Union[str, Any] = {"generator": generator}
if args.steps is not None:
lowercase : Dict = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase : List[str] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 42
| 0
|
def lowercase ( SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
_snake_case : Union[str, Any] = [0] * len(__A )
_snake_case : str = []
_snake_case : Optional[Any] = [1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
_snake_case : int = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_snake_case : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
a__ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 317
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __UpperCAmelCase ( _lowerCamelCase ):
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return 0.0
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[int | float, int | float]:
_snake_case = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_snake_case = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> None:
_snake_case = 512
_snake_case = [1] + [0] * (size - 1)
_snake_case = [filter_type.process(__A ) for item in inputs]
_snake_case = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case = np.abs(np.fft.fft(__A ) )
_snake_case = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
_snake_case = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(__A )
plt.show()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> None:
_snake_case = 512
_snake_case = [1] + [0] * (size - 1)
_snake_case = [filter_type.process(__A ) for item in inputs]
_snake_case = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 42
| 0
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _A ( _lowerCamelCase ):
def __init__( self : Dict , _A : str=0.01 , _A : int=1_000 ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = p_stop
lowercase : List[Any] = max_length
def __iter__( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] = 0
lowercase : Dict = False
while not stop and count < self.max_length:
yield count
count += 1
lowercase : int = random.random() < self.p_stop
class _A ( unittest.TestCase ):
def __a ( self : Any , _A : Union[str, Any] , _A : List[str] , _A : int=False , _A : List[Any]=True ) -> int:
"""simple docstring"""
lowercase : Dict = [
BatchSamplerShard(lowerCAmelCase_ , 2 , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
for i in range(2 )
]
lowercase : Optional[int] = [list(lowerCAmelCase_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCAmelCase_ ) for shard in batch_sampler_shards] , [len(lowerCAmelCase_ ) for e in expected] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __a ( self : int ) -> int:
"""simple docstring"""
lowercase : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase : int = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase : List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is very small.
lowercase : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : List[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : Any = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
lowercase : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
lowercase : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
lowercase : int = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
lowercase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : Optional[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
lowercase : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : str = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
def __a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
lowercase : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
lowercase : Dict = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
lowercase : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
lowercase : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
lowercase : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : int = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
lowercase : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
lowercase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
def __a ( self : Dict ) -> str:
"""simple docstring"""
lowercase : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
lowercase : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
lowercase : Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
lowercase : Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
lowercase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : Any = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
lowercase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
def __a ( self : List[str] ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowercase : str = [BatchSamplerShard(lowerCAmelCase_ , 2 , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __a ( self : Union[str, Any] , _A : List[Any] , _A : Tuple , _A : Tuple , _A : Tuple=False , _A : Tuple=2 , _A : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
random.seed(lowerCAmelCase_ )
lowercase : Optional[Any] = list(lowerCAmelCase_ )
lowercase : Optional[Any] = [
IterableDatasetShard(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=lowerCAmelCase_ , num_processes=lowerCAmelCase_ , process_index=lowerCAmelCase_ , split_batches=lowerCAmelCase_ , )
for i in range(lowerCAmelCase_ )
]
lowercase : Tuple = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCAmelCase_ )
iterable_dataset_lists.append(list(lowerCAmelCase_ ) )
lowercase : Tuple = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowercase : List[str] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
self.assertTrue(len(lowerCAmelCase_ ) % shard_batch_size == 0 )
lowercase : Optional[Any] = []
for idx in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCAmelCase_ ) < len(lowerCAmelCase_ ):
reference += reference
self.assertListEqual(lowerCAmelCase_ , reference[: len(lowerCAmelCase_ )] )
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : Any = 42
lowercase : List[Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Edge case with a very small dataset
lowercase : List[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
lowercase : Optional[int] = SkipBatchSampler(lowerCAmelCase_ , 2 )
self.assertListEqual(list(lowerCAmelCase_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase : Any = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : int = DataLoader(list(range(16 ) ) , batch_size=4 )
lowercase : Optional[Any] = skip_first_batches(lowerCAmelCase_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __a ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase : Tuple = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
Accelerator()
lowercase : Union[str, Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 308
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False , **lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = vocab_size
_snake_case = d_embed
_snake_case = d_proj
_snake_case = cutoffs + [vocab_size]
_snake_case = [0] + self.cutoffs
_snake_case = div_val
_snake_case = self.cutoffs[0]
_snake_case = len(self.cutoffs ) - 1
_snake_case = self.shortlist_size + self.n_clusters
_snake_case = keep_order
_snake_case = []
_snake_case = []
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.n_clusters > 0:
_snake_case = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_weight' )
_snake_case = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_snake_case = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case = self.d_embed // (self.div_val**i)
_snake_case = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' )
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = x
if proj is not None:
_snake_case = tf.einsum('ibd,ed->ibe' , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum('ibd,nd->ibn' , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = tf.range(lp_size[0] , dtype=target.dtype )
_snake_case = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = 0
if self.n_clusters == 0:
_snake_case = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_snake_case = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = []
_snake_case = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_snake_case = (target >= l_idx) & (target < r_idx)
_snake_case = tf.where(lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
_snake_case = self.out_layers[0][0][l_idx:r_idx]
_snake_case = self.out_layers[0][1][l_idx:r_idx]
else:
_snake_case = self.out_layers[i][0]
_snake_case = self.out_layers[i][1]
if i == 0:
_snake_case = tf.concat([cur_W, self.cluster_weight] , 0 )
_snake_case = tf.concat([cur_b, self.cluster_bias] , 0 )
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
_snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
_snake_case = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
_snake_case = tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
_snake_case = tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 42
| 0
|
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCAmelCase : Union[str, Any] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
lowerCAmelCase : str = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
lowerCAmelCase : Any = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def _lowerCAmelCase ( self , _a , _a , _a=None , _a=True , _a=False ):
"""simple docstring"""
if rouge_types is None:
lowerCamelCase = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
lowerCamelCase = rouge_scorer.RougeScorer(rouge_types=lowerCAmelCase_ , use_stemmer=lowerCAmelCase_ )
if use_aggregator:
lowerCamelCase = scoring.BootstrapAggregator()
else:
lowerCamelCase = []
for ref, pred in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCamelCase = scorer.score(lowerCAmelCase_ , lowerCAmelCase_ )
if use_aggregator:
aggregator.add_scores(lowerCAmelCase_ )
else:
scores.append(lowerCAmelCase_ )
if use_aggregator:
lowerCamelCase = aggregator.aggregate()
else:
lowerCamelCase = {}
for key in scores[0]:
lowerCamelCase = [score[key] for score in scores]
return result
| 291
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : Dict = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, float]:
_snake_case = len([g for position, g in enumerate(__A ) if g == main_target[position]] )
return (item, float(__A ))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, str]:
_snake_case = random.randint(0 , len(__A ) - 1 )
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = list(__A )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case = random.choice(__A )
return "".join(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , ) -> list[str]:
_snake_case = []
# Generate more children proportionally to the fitness score.
_snake_case = int(parent_a[1] * 100 ) + 1
_snake_case = 10 if child_n >= 10 else child_n
for _ in range(__A ):
_snake_case = population_score[random.randint(0 , __A )][0]
_snake_case , _snake_case = crossover(parent_a[0] , __A )
# Append new string to the population list.
pop.append(mutate(__A , __A ) )
pop.append(mutate(__A , __A ) )
return pop
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_snake_case = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(__A )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(__A )
# Generate random starting population.
_snake_case = []
for _ in range(__A ):
population.append(''.join([random.choice(__A ) for i in range(len(__A ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case , _snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__A )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case = [evaluate(__A , __A ) for item in population]
# Check if there is a matching evolution.
_snake_case = sorted(__A , key=lambda __A : x[1] , reverse=__A )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__A )
# Normalize population score to be between 0 and 1.
_snake_case = [
(item, score / len(__A )) for item, score in population_score
]
# This is selection
for i in range(__A ):
population.extend(select(population_score[int(__A )] , __A , __A ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__A ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowercase , lowercase , lowercase : Tuple = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 42
| 0
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = mock.Mock()
UpperCAmelCase : Union[str, Any] = 5_0_0
UpperCAmelCase : Union[str, Any] = {}
UpperCAmelCase : Union[str, Any] = HTTPError
UpperCAmelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase_ ) as mock_head:
UpperCAmelCase : Optional[int] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = mock.Mock()
UpperCAmelCase : int = 5_0_0
UpperCAmelCase : Tuple = {}
UpperCAmelCase : Tuple = HTTPError
UpperCAmelCase : Optional[int] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase_ ) as mock_head:
UpperCAmelCase : Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def A_ ( self ):
'''simple docstring'''
try:
UpperCAmelCase : List[Any] = tempfile.mktemp()
with open(lowerCAmelCase_ , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , lowerCAmelCase_ )
UpperCAmelCase : str = AlbertTokenizer.from_pretrained(lowerCAmelCase_ )
finally:
os.remove(lowerCAmelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , lowerCAmelCase_ )
UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = TOKEN
HfFolder.save_token(lowerCAmelCase_ )
@classmethod
def A_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def A_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : List[Any] = os.path.join(lowerCAmelCase_ , "vocab.txt" )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCAmelCase : List[Any] = BertTokenizer(lowerCAmelCase_ )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
UpperCAmelCase : Any = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ , repo_id="test-tokenizer" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
UpperCAmelCase : Optional[Any] = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def A_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Optional[Any] = os.path.join(lowerCAmelCase_ , "vocab.txt" )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCAmelCase : Union[str, Any] = BertTokenizer(lowerCAmelCase_ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
UpperCAmelCase : str = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCAmelCase_ , repo_id="valid_org/test-tokenizer-org" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
UpperCAmelCase : int = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def A_ ( self ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Any = os.path.join(lowerCAmelCase_ , "vocab.txt" )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCAmelCase : int = CustomTokenizer(lowerCAmelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
UpperCAmelCase : Any = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" , trust_remote_code=lowerCAmelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Tuple = os.path.join(lowerCAmelCase_ , "vocab.txt" )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCAmelCase : List[str] = BertTokenizerFast.from_pretrained(lowerCAmelCase_ )
bert_tokenizer.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase : str = CustomTokenizerFast.from_pretrained(lowerCAmelCase_ )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
UpperCAmelCase : str = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" , trust_remote_code=lowerCAmelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
f"{USER}/test-dynamic-tokenizer" , use_fast=lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = Trie()
UpperCAmelCase : str = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCAmelCase_ , ["AB", "C"] )
| 311
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["ChineseCLIPFeatureExtractor"]
lowercase : List[Any] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : int = list(range(len(__A ) ) )
lowerCAmelCase : Dict = [v / w for v, w in zip(__A , __A )]
index.sort(key=lambda SCREAMING_SNAKE_CASE : ratio[i] , reverse=__A )
lowerCAmelCase : Dict = 0
lowerCAmelCase : Optional[int] = [0] * len(__A )
for i in index:
if weight[i] <= capacity:
lowerCAmelCase : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCAmelCase : List[str] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = 1
_snake_case = 2
while i * i <= n:
_snake_case = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_snake_case = 1
_snake_case = 1
while True:
i += 1
t_num += i
if count_divisors(__A ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 42
| 0
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a = ["gpt2"]
a = "gpt2"
if is_tf_available():
class lowercase_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Tuple ):
super().__init__()
_A = tokenizer
_A = AutoConfig.from_pretrained(lowerCAmelCase_ )
_A = TFGPTaLMHeadModel.from_config(lowerCAmelCase_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Any ):
_A = self.tokenizer(lowerCAmelCase_ )
_A = tokenized['input_ids'].to_tensor()
_A = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_A = self.model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )['logits']
return outputs
@require_tf
@require_keras_nlp
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[Any] ):
super().setUp()
_A = [GPTaTokenizer.from_pretrained(lowerCAmelCase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_A = [TFGPTaTokenizer.from_pretrained(lowerCAmelCase_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_A = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_A = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCAmelCase_ ( self : Optional[int] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_A = tokenizer([test_inputs] , return_tensors='tf' )
_A = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_A = python_outputs[key].numpy()
_A = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCAmelCase_ , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCAmelCase_ ( self : Tuple ):
for tf_tokenizer in self.tf_tokenizers:
_A = tf.function(lowerCAmelCase_ )
for test_inputs in self.test_sentences:
_A = tf.constant(lowerCAmelCase_ )
_A = compiled_tokenizer(lowerCAmelCase_ )
_A = tf_tokenizer(lowerCAmelCase_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCAmelCase_ ( self : Any ):
for tf_tokenizer in self.tf_tokenizers:
_A = ModelToSave(tokenizer=lowerCAmelCase_ )
_A = tf.convert_to_tensor([self.test_sentences[0]] )
_A = model.serving(lowerCAmelCase_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_A = Path(lowerCAmelCase_ ) / 'saved.model'
tf.saved_model.save(lowerCAmelCase_ , lowerCAmelCase_ , signatures={'serving_default': model.serving} )
_A = tf.saved_model.load(lowerCAmelCase_ )
_A = loaded_model.signatures['serving_default'](lowerCAmelCase_ )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
for tf_tokenizer in self.tf_tokenizers:
_A = tf.convert_to_tensor([self.test_sentences[0]] )
_A = tf_tokenizer(lowerCAmelCase_ ) # Build model with some sample inputs
_A = tf_tokenizer.get_config()
_A = TFGPTaTokenizer.from_config(lowerCAmelCase_ )
_A = model_from_config(lowerCAmelCase_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_A = 123_123
for max_length in [3, 5, 1_024]:
_A = tf.convert_to_tensor([self.test_sentences[0]] )
_A = tf_tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ )
_A = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 315
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __UpperCAmelCase ( _lowerCamelCase ):
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='fill-mask' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_snake_case = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='fill-mask' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
_snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_snake_case = self.get_env()
_snake_case = '1'
_snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import AutoModel\n '
_snake_case = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 42
| 0
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__snake_case = datasets.logging.get_logger(__name__)
__snake_case = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
__snake_case = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
__snake_case = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _A ( _lowercase , _lowercase , _lowercase=False , _lowercase=False , _lowercase=True , _lowercase=False , _lowercase="dummy_doc" ) -> str:
"""simple docstring"""
__UpperCamelCase = {doc: key_lines}
__UpperCamelCase = {doc: sys_lines}
__UpperCamelCase = {}
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase, __UpperCamelCase = reader.get_doc_mentions(__A , key_doc_lines[doc] , __A )
key_singletons_num += singletons_num
if NP_only or min_span:
__UpperCamelCase = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A )
__UpperCamelCase, __UpperCamelCase = reader.get_doc_mentions(__A , sys_doc_lines[doc] , __A )
sys_singletons_num += singletons_num
if NP_only or min_span:
__UpperCamelCase = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A )
if remove_nested:
__UpperCamelCase, __UpperCamelCase = reader.remove_nested_coref_mentions(__A , __A )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__UpperCamelCase, __UpperCamelCase = reader.remove_nested_coref_mentions(__A , __A )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__UpperCamelCase = reader.get_mention_assignments(__A , __A )
__UpperCamelCase = reader.get_mention_assignments(__A , __A )
__UpperCamelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'Number of resulting singleton clusters in the key '
f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'files, respectively' )
return doc_coref_infos
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = get_coref_infos(__A , __A , __A , __A , __A , __A )
__UpperCamelCase = {}
__UpperCamelCase = 0
__UpperCamelCase = 0
for name, metric in metrics:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = evaluator.evaluate_documents(__A , __A , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , f'''Recall: {recall * 1_00:.2f}''' , f''' Precision: {precision * 1_00:.2f}''' , f''' F1: {fa * 1_00:.2f}''' , )
if conll_subparts_num == 3:
__UpperCamelCase = (conll / 3) * 1_00
logger.info(f'''CoNLL score: {conll:.2f}''' )
output_scores.update({'conll_score': conll} )
return output_scores
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
__UpperCamelCase = line.split()[5]
if not parse_col == "-":
__UpperCamelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase (datasets.Metric ):
def snake_case_ ( self: int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ),codebase_urls=['https://github.com/ns-moosavi/coval'],reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
],)
def snake_case_ ( self: Any,A_: str,A_: Union[str, Any],A_: Any=True,A_: int=False,A_: Optional[int]=False,A_: List[str]=False ):
'''simple docstring'''
__UpperCamelCase = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
__UpperCamelCase = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__UpperCamelCase = evaluate(
key_lines=lowerCAmelCase_,sys_lines=lowerCAmelCase_,metrics=lowerCAmelCase_,NP_only=lowerCAmelCase_,remove_nested=lowerCAmelCase_,keep_singletons=lowerCAmelCase_,min_span=lowerCAmelCase_,)
return score
| 310
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCAmelCase_ ).to(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained('google/mt5-small' )
_snake_case = tokenizer('Hello there' , return_tensors='pt' ).input_ids
_snake_case = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
_snake_case = model(input_ids.to(lowerCAmelCase_ ) , labels=labels.to(lowerCAmelCase_ ) ).loss
_snake_case = -(labels.shape[-1] * loss.item())
_snake_case = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 42
| 0
|
from __future__ import annotations
def A ( _lowercase , _lowercase , _lowercase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : List[str] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
for attribute in key.split('.' ):
_UpperCamelCase : Any = getattr(__A , __A )
if weight_type is not None:
_UpperCamelCase : Union[str, Any] = getattr(__A , __A ).shape
else:
_UpperCamelCase : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
_UpperCamelCase : str = value
elif weight_type == "weight_g":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_v":
_UpperCamelCase : Optional[int] = value
elif weight_type == "bias":
_UpperCamelCase : List[str] = value
else:
_UpperCamelCase : int = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Tuple = []
_UpperCamelCase : List[str] = fairseq_model.state_dict()
_UpperCamelCase : Dict = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : str = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
_UpperCamelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Optional[Any] = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_UpperCamelCase : List[str] = True
if "*" in mapped_key:
_UpperCamelCase : int = name.split(__A )[0].split('.' )[-2]
_UpperCamelCase : Any = mapped_key.replace('*' , __A )
if "weight_g" in name:
_UpperCamelCase : Any = 'weight_g'
elif "weight_v" in name:
_UpperCamelCase : str = 'weight_v'
elif "weight" in name:
_UpperCamelCase : Dict = 'weight'
elif "bias" in name:
_UpperCamelCase : int = 'bias'
else:
_UpperCamelCase : List[str] = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f'Unused weights: {unused_weights}' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = full_name.split('conv_layers.' )[-1]
_UpperCamelCase : Tuple = name.split('.' )
_UpperCamelCase : Optional[Any] = int(items[0] )
_UpperCamelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_UpperCamelCase : Dict = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_UpperCamelCase : str = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_UpperCamelCase : List[str] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_UpperCamelCase : str = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = SEWConfig()
if is_finetuned:
_UpperCamelCase : str = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : int = model.cfg
_UpperCamelCase : List[Any] = fs_config.conv_bias
_UpperCamelCase : Optional[Any] = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Optional[Any] = [x[0] for x in conv_layers]
_UpperCamelCase : Any = [x[1] for x in conv_layers]
_UpperCamelCase : Tuple = [x[2] for x in conv_layers]
_UpperCamelCase : Optional[int] = 'gelu'
_UpperCamelCase : Union[str, Any] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
_UpperCamelCase : Dict = 0.0
_UpperCamelCase : Any = fs_config.activation_fn.name
_UpperCamelCase : Any = fs_config.encoder_embed_dim
_UpperCamelCase : Optional[int] = 0.02
_UpperCamelCase : Optional[int] = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : str = 1E-5
_UpperCamelCase : str = fs_config.encoder_layerdrop
_UpperCamelCase : Tuple = fs_config.encoder_attention_heads
_UpperCamelCase : List[Any] = fs_config.conv_pos_groups
_UpperCamelCase : int = fs_config.conv_pos
_UpperCamelCase : Optional[Any] = len(__A )
_UpperCamelCase : Dict = fs_config.encoder_layers
_UpperCamelCase : Any = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : int = model.cfg
_UpperCamelCase : Any = fs_config.final_dropout
_UpperCamelCase : Optional[Any] = fs_config.layerdrop
_UpperCamelCase : List[Any] = fs_config.activation_dropout
_UpperCamelCase : str = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : Any = fs_config.attention_dropout
_UpperCamelCase : Optional[Any] = fs_config.dropout_input
_UpperCamelCase : int = fs_config.dropout
_UpperCamelCase : Tuple = fs_config.mask_channel_length
_UpperCamelCase : List[str] = fs_config.mask_channel_prob
_UpperCamelCase : Union[str, Any] = fs_config.mask_length
_UpperCamelCase : int = fs_config.mask_prob
_UpperCamelCase : Dict = 'Wav2Vec2FeatureExtractor'
_UpperCamelCase : Tuple = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=True ):
if is_finetuned:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : Dict = SEWConfig.from_pretrained(__A )
else:
_UpperCamelCase : str = convert_config(model[0] , __A )
_UpperCamelCase : Union[str, Any] = model[0].eval()
_UpperCamelCase : Any = True if config.feat_extract_norm == 'layer' else False
_UpperCamelCase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
if is_finetuned:
if dict_path:
_UpperCamelCase : Optional[Any] = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[Any] = target_dict.pad_index
_UpperCamelCase : Union[str, Any] = target_dict.bos_index
_UpperCamelCase : Optional[int] = target_dict.pad_index
_UpperCamelCase : str = target_dict.bos_index
_UpperCamelCase : Dict = target_dict.eos_index
_UpperCamelCase : Optional[Any] = len(target_dict.symbols )
_UpperCamelCase : Tuple = os.path.join(__A , 'vocab.json' )
if not os.path.isdir(__A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
with open(__A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , __A )
_UpperCamelCase : List[Any] = WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__A , )
_UpperCamelCase : Union[str, Any] = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
_UpperCamelCase : Tuple = SEWForCTC(__A )
else:
_UpperCamelCase : Any = SEWModel(__A )
feature_extractor.save_pretrained(__A )
recursively_load_weights(__A , __A , __A )
hf_model.save_pretrained(__A )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case_ : Union[str, Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 83
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowercase : set[int] = {ord(char) for char in VALID_CHARS}
lowercase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str | None:
_snake_case = ""
_snake_case = 42
_snake_case = 42
_snake_case = 42
for keychar, cipherchar in zip(cycle(__A ) , __A ):
_snake_case = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__A )
return decoded
def SCREAMING_SNAKE_CASE__ ( __A ) -> list[str]:
_snake_case = []
for key in product(__A , repeat=3 ):
_snake_case = try_key(__A , __A )
if encoded is not None:
possibles.append(__A )
return possibles
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__ ( __A = "p059_cipher.txt" ) -> int:
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = Path(__A ).parent.joinpath(__A ).read_text(encoding='utf-8' )
_snake_case = [int(__A ) for number in data.strip().split(',' )]
_snake_case = filter_valid_chars(__A )
for common_word in COMMON_WORDS:
_snake_case = filter_common_word(__A , __A )
if len(__A ) == 1:
break
_snake_case = possibles[0]
return sum(ord(__A ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : Union[str, Any]=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Any=0.9 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = size if size is not None else {"""shortest_edge""": 30}
snake_case_ = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize_and_center_crop
snake_case_ = size
snake_case_ = crop_pct
snake_case_ = crop_size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
def lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __A (_lowerCamelCase , unittest.TestCase):
'''simple docstring'''
__lowercase: Union[str, Any] = PoolFormerImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = PoolFormerImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """crop_pct""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_std""" ) )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 347
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A = 1_000_000 ) -> int:
_snake_case = limit + 1
_snake_case = [0] * limit
for first_term in range(1 , __A ):
for n in range(__A , __A , __A ):
_snake_case = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_snake_case = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Tuple = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE__ ( __A = 1_500_000 ) -> int:
_snake_case = defaultdict(__A )
_snake_case = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ):
if gcd(__A , __A ) > 1:
continue
_snake_case = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__A , limit + 1 , __A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
from __future__ import annotations
from collections.abc import MutableSequence
class _A :
def __init__( self : List[str] , _A : str , _A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase : List[Any] = list(lowerCAmelCase_ )
lowercase : Any = degree
def __add__( self : Optional[int] , _A : Dict ) -> Any:
"""simple docstring"""
if self.degree > polynomial_a.degree:
lowercase : Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
lowercase : int = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self : Tuple , _A : int ) -> int:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Dict ) -> List[str]:
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Tuple , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def __a ( self : Union[str, Any] , _A : int ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Any ) -> str:
"""simple docstring"""
lowercase : Any = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.__str__()
def __a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = [0] * self.degree
for i in range(self.degree ):
lowercase : Optional[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def __a ( self : str , _A : List[str] = 0 ) -> str:
"""simple docstring"""
lowercase : Optional[Any] = [0] * (self.degree + 2)
lowercase : Optional[int] = constant
for i in range(self.degree + 1 ):
lowercase : Tuple = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self : Any , _A : Dict ) -> List[str]:
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Union[str, Any] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return not self.__eq__(lowerCAmelCase_ )
| 308
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowercase : Optional[Any] = False
class __UpperCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger '
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = generator.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger '
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
_snake_case = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 42
| 0
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = torch.nn.Linear(10 , 10 )
lowerCamelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCamelCase = Accelerator()
lowerCamelCase = accelerator.prepare(lowerCAmelCase_ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase_ ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 291
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A = 100 ) -> int:
_snake_case = n * (n + 1) * (2 * n + 1) / 6
_snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
a : Any = 5_00_00
a : str = 50_00
a : List[Any] = os.path.split(__file__)
a : List[str] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for i in range(__A ):
UpperCAmelCase : Any = dataset[i]
@get_duration
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
for i in range(0 , len(__A ) , __A ):
UpperCAmelCase : Optional[Any] = dataset[i : i + batch_size]
@get_duration
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
with dataset.formatted_as(type=__A ):
for i in range(__A ):
UpperCAmelCase : Optional[Any] = dataset[i]
@get_duration
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
with dataset.formatted_as(type=__A ):
for i in range(0 , __A , __A ):
UpperCAmelCase : Any = dataset[i : i + batch_size]
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = {"num examples": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase : List[Any] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
UpperCAmelCase : List[str] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
UpperCAmelCase : int = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
UpperCAmelCase : Optional[int] = generate_example_dataset(
os.path.join(__A , "dataset.arrow" ) , __A , num_examples=__A , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(__A ) )
UpperCAmelCase : Any = func(__A , **__A )
print("shuffling dataset" )
UpperCAmelCase : Tuple = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(__A ) )
UpperCAmelCase : Union[str, Any] = func(
__A , **__A )
with open(__A , "wb" ) as f:
f.write(json.dumps(__A ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 311
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : str = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Dict:
for attribute in key.split('.' ):
_snake_case = getattr(__A , __A )
if weight_type is not None:
_snake_case = getattr(__A , __A ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
_snake_case = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__A )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , __A )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "weight" in name:
_snake_case = 'weight'
elif "bias" in name:
_snake_case = 'bias'
else:
_snake_case = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> int:
_snake_case = full_name.split('conv_layers.' )[-1]
_snake_case = name.split('.' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = SEWConfig()
if is_finetuned:
_snake_case = model.wav_encoder.wav_model.cfg
else:
_snake_case = model.cfg
_snake_case = fs_config.conv_bias
_snake_case = eval(fs_config.conv_feature_layers )
_snake_case = [x[0] for x in conv_layers]
_snake_case = [x[1] for x in conv_layers]
_snake_case = [x[2] for x in conv_layers]
_snake_case = 'gelu'
_snake_case = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
_snake_case = 0.0
_snake_case = fs_config.activation_fn.name
_snake_case = fs_config.encoder_embed_dim
_snake_case = 0.0_2
_snake_case = fs_config.encoder_ffn_embed_dim
_snake_case = 1e-5
_snake_case = fs_config.encoder_layerdrop
_snake_case = fs_config.encoder_attention_heads
_snake_case = fs_config.conv_pos_groups
_snake_case = fs_config.conv_pos
_snake_case = len(__A )
_snake_case = fs_config.encoder_layers
_snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_snake_case = model.cfg
_snake_case = fs_config.final_dropout
_snake_case = fs_config.layerdrop
_snake_case = fs_config.activation_dropout
_snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_snake_case = fs_config.attention_dropout
_snake_case = fs_config.dropout_input
_snake_case = fs_config.dropout
_snake_case = fs_config.mask_channel_length
_snake_case = fs_config.mask_channel_prob
_snake_case = fs_config.mask_length
_snake_case = fs_config.mask_prob
_snake_case = 'Wav2Vec2FeatureExtractor'
_snake_case = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=None , __A=None , __A=True ) -> List[str]:
if is_finetuned:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_snake_case = SEWConfig.from_pretrained(__A )
else:
_snake_case = convert_config(model[0] , __A )
_snake_case = model[0].eval()
_snake_case = True if config.feat_extract_norm == 'layer' else False
_snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
if is_finetuned:
if dict_path:
_snake_case = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.eos_index
_snake_case = len(target_dict.symbols )
_snake_case = os.path.join(__A , 'vocab.json' )
if not os.path.isdir(__A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
with open(__A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , __A )
_snake_case = WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__A , )
_snake_case = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
_snake_case = SEWForCTC(__A )
else:
_snake_case = SEWModel(__A )
feature_extractor.save_pretrained(__A )
recursively_load_weights(__A , __A , __A )
hf_model.save_pretrained(__A )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase : Union[str, Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 42
| 0
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : Tuple = []
lowerCAmelCase : Tuple = []
lowerCAmelCase : Any = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
lowerCAmelCase : int = len(__A ) if (len(__A ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(__A ) , "Postfix".center(__A ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__A ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__A ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__A ) == 0:
stack.append(__A ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__A ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__A ) # push x to stack
print(
x.center(8 ) , ("".join(__A )).ljust(__A ) , ("".join(__A )).ljust(__A ) , sep=" | " , ) # Output in tabular format
while len(__A ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(__A )).ljust(__A ) , ("".join(__A )).ljust(__A ) , sep=" | " , ) # Output in tabular format
return "".join(__A ) # return Postfix as str
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__A ) ):
if infix[i] == "(":
lowerCAmelCase : Dict = ")" # change "(" to ")"
elif infix[i] == ")":
lowerCAmelCase : str = "(" # change ")" to "("
return (infix_2_postfix("".join(__A ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCAmelCase__ = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
lowerCAmelCase__ = "".join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 108
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """xlnet"""
__lowercase = ["""mems"""]
__lowercase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase_=3_20_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=40_96 , lowerCAmelCase_="gelu" , lowerCAmelCase_=True , lowerCAmelCase_="bi" , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=-1 , lowerCAmelCase_=False , lowerCAmelCase_="last" , lowerCAmelCase_=True , lowerCAmelCase_="tanh" , lowerCAmelCase_=0.1 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = d_model
_snake_case = n_layer
_snake_case = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
_snake_case = d_model // n_head
_snake_case = ff_activation
_snake_case = d_inner
_snake_case = untie_r
_snake_case = attn_type
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = dropout
_snake_case = mem_len
_snake_case = reuse_len
_snake_case = bi_data
_snake_case = clamp_len
_snake_case = same_length
_snake_case = summary_type
_snake_case = summary_use_proj
_snake_case = summary_activation
_snake_case = summary_last_dropout
_snake_case = start_n_top
_snake_case = end_n_top
_snake_case = bos_token_id
_snake_case = pad_token_id
_snake_case = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , lowerCAmelCase_ , )
_snake_case = kwargs['use_cache']
_snake_case = use_mems_eval
_snake_case = use_mems_train
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 42
| 0
|
"""simple docstring"""
import math
def _snake_case ( _snake_case : Optional[int] ) -> list:
'''simple docstring'''
_A = [True] * n
_A = False
_A = False
_A = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_A = i * 2
while index < n:
_A = False
_A = index + i
_A = [2]
for i in range(3 , __A , 2 ):
if is_prime[i]:
primes.append(__A )
return primes
def _snake_case ( _snake_case : Optional[Any] = 99_99_66_66_33_33 ) -> int:
'''simple docstring'''
_A = math.floor(math.sqrt(__A ) ) + 1_00
_A = prime_sieve(__A )
_A = 0
_A = 0
_A = primes[prime_index]
while (last_prime**2) <= limit:
_A = primes[prime_index + 1]
_A = last_prime**2
_A = next_prime**2
# Get numbers divisible by lps(current)
_A = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_A = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_A = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_A = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 315
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
_snake_case = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_snake_case = model(lowerCAmelCase_ )['last_hidden_state']
_snake_case = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
# compare the actual values for a slice.
_snake_case = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 42
| 0
|
__snake_case = "Input must be a string of 8 numbers plus letter"
__snake_case = "TRWAGMYFPDXBNJZSQVHLCKE"
def _A ( _lowercase ) -> bool:
"""simple docstring"""
if not isinstance(__A , __A ):
__UpperCamelCase = f'''Expected string as input, found {type(__A ).__name__}'''
raise TypeError(__A )
__UpperCamelCase = spanish_id.replace('-' , '' ).upper()
if len(__A ) != 9:
raise ValueError(__A )
try:
__UpperCamelCase = int(spanish_id_clean[0:8] )
__UpperCamelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__A ) from ex
if letter.isdigit():
raise ValueError(__A )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 42
| 0
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
__UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
__UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = ''''''
SCREAMING_SNAKE_CASE : List[Any] = 42
SCREAMING_SNAKE_CASE : List[str] = 42
SCREAMING_SNAKE_CASE : Tuple = 42
for keychar, cipherchar in zip(cycle(__A ) , __A ):
SCREAMING_SNAKE_CASE : int = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__A )
return decoded
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = []
for key in product(__A , repeat=3 ):
SCREAMING_SNAKE_CASE : Any = try_key(__A , __A )
if encoded is not None:
possibles.append(__A )
return possibles
def A ( _lowercase , _lowercase ):
return [possible for possible in possibles if common_word in possible.lower()]
def A ( _lowercase = "p059_cipher.txt" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 42
SCREAMING_SNAKE_CASE : Any = 42
SCREAMING_SNAKE_CASE : Optional[int] = 42
SCREAMING_SNAKE_CASE : Optional[int] = 42
SCREAMING_SNAKE_CASE : Optional[Any] = Path(__A ).parent.joinpath(__A ).read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : Optional[int] = [int(__A ) for number in data.strip().split(''',''' )]
SCREAMING_SNAKE_CASE : List[str] = filter_valid_chars(__A )
for common_word in COMMON_WORDS:
SCREAMING_SNAKE_CASE : Tuple = filter_common_word(__A , __A )
if len(__A ) == 1:
break
SCREAMING_SNAKE_CASE : str = possibles[0]
return sum(ord(__A ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 182
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase : List[str] = logging.get_logger("transformers.models.speecht5")
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Dict:
hf_model.apply_weight_norm()
_snake_case = checkpoint['input_conv.weight_g']
_snake_case = checkpoint['input_conv.weight_v']
_snake_case = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
_snake_case = checkpoint[F'upsamples.{i}.1.weight_g']
_snake_case = checkpoint[F'upsamples.{i}.1.weight_v']
_snake_case = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
_snake_case = checkpoint['output_conv.1.weight_g']
_snake_case = checkpoint['output_conv.1.weight_v']
_snake_case = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A=None , __A=None , ) -> List[Any]:
if config_path is not None:
_snake_case = SpeechTaHifiGanConfig.from_pretrained(__A )
else:
_snake_case = SpeechTaHifiGanConfig()
_snake_case = SpeechTaHifiGan(__A )
_snake_case = torch.load(__A )
load_weights(orig_checkpoint['model']['generator'] , __A , __A )
_snake_case = np.load(__A )
_snake_case = stats[0].reshape(-1 )
_snake_case = stats[1].reshape(-1 )
_snake_case = torch.from_numpy(__A ).float()
_snake_case = torch.from_numpy(__A ).float()
model.save_pretrained(__A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__A )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase : List[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 42
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A__ ( UpperCAmelCase_ ):
# vision encoder
if "img_encoder.pos_embed" in name:
_UpperCamelCase : Union[str, Any] = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
_UpperCamelCase : int = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
_UpperCamelCase : Tuple = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
_UpperCamelCase : List[str] = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
_UpperCamelCase : List[str] = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
_UpperCamelCase : Tuple = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
_UpperCamelCase : Optional[Any] = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
_UpperCamelCase : List[str] = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
_UpperCamelCase : Optional[int] = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
_UpperCamelCase : int = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
_UpperCamelCase : List[str] = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
_UpperCamelCase : str = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
_UpperCamelCase : int = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
_UpperCamelCase : Dict = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
_UpperCamelCase : List[str] = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
_UpperCamelCase : List[str] = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
_UpperCamelCase : List[Any] = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
_UpperCamelCase : List[Any] = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
_UpperCamelCase : List[Any] = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
_UpperCamelCase : str = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
_UpperCamelCase : Any = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
_UpperCamelCase : Any = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
_UpperCamelCase : Any = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
_UpperCamelCase : List[str] = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
for key in orig_state_dict.copy().keys():
_UpperCamelCase : Any = orig_state_dict.pop(__A )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_UpperCamelCase : Optional[int] = key.split('.' )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
_UpperCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_UpperCamelCase : List[str] = val[:dim, :]
_UpperCamelCase : Union[str, Any] = val[dim : dim * 2, :]
_UpperCamelCase : List[str] = val[-dim:, :]
else:
_UpperCamelCase : int = val[:dim]
_UpperCamelCase : str = val[dim : dim * 2]
_UpperCamelCase : Dict = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_UpperCamelCase : Dict = key.split('.' )
_UpperCamelCase : List[Any] = int(key_split[3] )
_UpperCamelCase : str = config.text_config.hidden_size
if "weight" in key:
_UpperCamelCase : Union[str, Any] = val[:dim, :]
_UpperCamelCase : str = val[
dim : dim * 2, :
]
_UpperCamelCase : Optional[Any] = val[-dim:, :]
else:
_UpperCamelCase : Optional[Any] = val[:dim]
_UpperCamelCase : Dict = val[dim : dim * 2]
_UpperCamelCase : int = val[-dim:]
else:
_UpperCamelCase : Dict = rename_key(__A )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_UpperCamelCase : Optional[int] = val.squeeze_()
else:
_UpperCamelCase : Any = val
return orig_state_dict
def A__ ( ):
_UpperCamelCase : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCamelCase : Tuple = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="groupvit-gcc-yfcc" , UpperCAmelCase_=False ):
_UpperCamelCase : List[str] = GroupViTConfig()
_UpperCamelCase : List[Any] = GroupViTModel(__A ).eval()
_UpperCamelCase : int = torch.load(__A , map_location='cpu' )['model']
_UpperCamelCase : Tuple = convert_state_dict(__A , __A )
_UpperCamelCase , _UpperCamelCase : Dict = model.load_state_dict(__A , strict=__A )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__A ) == 0)
# verify result
_UpperCamelCase : Union[str, Any] = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
_UpperCamelCase : Any = prepare_img()
_UpperCamelCase : Optional[int] = processor(text=['a photo of a cat', 'a photo of a dog'] , images=__A , padding=__A , return_tensors='pt' )
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(**__A )
if model_name == "groupvit-gcc-yfcc":
_UpperCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
_UpperCamelCase : Optional[int] = torch.tensor([[1_6.1_8_7_3, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , __A , atol=1E-3 )
processor.save_pretrained(__A )
model.save_pretrained(__A )
print('Successfully saved processor and model to' , __A )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(__A , organization='nielsr' )
model.push_to_hub(__A , organization='nielsr' )
if __name__ == "__main__":
snake_case_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
snake_case_ : int = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 83
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("DownEncoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_=True , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = torch.nn.Convad(
lowerCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
# down
_snake_case = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_down_block(
lowerCAmelCase_ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# out
_snake_case = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = 2 * out_channels if double_z else out_channels
_snake_case = nn.Convad(block_out_channels[-1] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = x
_snake_case = self.conv_in(lowerCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
_snake_case = down_block(lowerCAmelCase_ )
# middle
_snake_case = self.mid_block(lowerCAmelCase_ )
# post-process
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("UpDecoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_="group" , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = nn.Convad(
lowerCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
_snake_case = in_channels if norm_type == 'spatial' else None
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# up
_snake_case = list(reversed(lowerCAmelCase_ ) )
_snake_case = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = reversed_block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_up_block(
lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , resnet_time_scale_shift=lowerCAmelCase_ , )
self.up_blocks.append(lowerCAmelCase_ )
_snake_case = output_channel
# out
if norm_type == "spatial":
_snake_case = SpatialNorm(block_out_channels[0] , lowerCAmelCase_ )
else:
_snake_case = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = nn.Convad(block_out_channels[0] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = z
_snake_case = self.conv_in(lowerCAmelCase_ )
_snake_case = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# middle
_snake_case = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = up_block(lowerCAmelCase_ , lowerCAmelCase_ )
# post-process
if latent_embeds is None:
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
else:
_snake_case = self.conv_norm_out(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="random" , lowerCAmelCase_=False , lowerCAmelCase_=True ):
"""simple docstring"""
super().__init__()
_snake_case = n_e
_snake_case = vq_embed_dim
_snake_case = beta
_snake_case = legacy
_snake_case = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_snake_case = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
_snake_case = self.used.shape[0]
_snake_case = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_snake_case = self.re_embed
_snake_case = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
_snake_case = n_e
_snake_case = sane_index_shape
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
_snake_case = (inds[:, :, None] == used[None, None, ...]).long()
_snake_case = match.argmax(-1 )
_snake_case = match.sum(2 ) < 1
if self.unknown_index == "random":
_snake_case = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_snake_case = self.unknown_index
return new.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_snake_case = 0 # simply set to zero
_snake_case = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase_ )
return back.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = z.permute(0 , 2 , 3 , 1 ).contiguous()
_snake_case = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_snake_case = torch.argmin(torch.cdist(lowerCAmelCase_ , self.embedding.weight ) , dim=1 )
_snake_case = self.embedding(lowerCAmelCase_ ).view(z.shape )
_snake_case = None
_snake_case = None
# compute loss for embedding
if not self.legacy:
_snake_case = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_snake_case = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_snake_case = z + (z_q - z).detach()
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_snake_case = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_snake_case = self.remap_to_used(lowerCAmelCase_ )
_snake_case = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_snake_case = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if self.remap is not None:
_snake_case = indices.reshape(shape[0] , -1 ) # add batch axis
_snake_case = self.unmap_to_all(lowerCAmelCase_ )
_snake_case = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_snake_case = self.embedding(lowerCAmelCase_ )
if shape is not None:
_snake_case = z_q.view(lowerCAmelCase_ )
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = parameters
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , dim=1 )
_snake_case = torch.clamp(self.logvar , -30.0 , 20.0 )
_snake_case = deterministic
_snake_case = torch.exp(0.5 * self.logvar )
_snake_case = torch.exp(self.logvar )
if self.deterministic:
_snake_case = _snake_case = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase ( self , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = randn_tensor(
self.mean.shape , generator=lowerCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_snake_case = self.mean + self.std * sample
return x
def lowerCamelCase ( self , lowerCAmelCase_=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
_snake_case = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
return self.mean
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Union[str, Any] = 10
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = 1
snake_case_ = max(__A )
while placement <= max_digit:
# declare and initialize empty buckets
snake_case_ = [[] for _ in range(__A )]
# split list_of_ints between the buckets
for i in list_of_ints:
snake_case_ = int((i / placement) % RADIX )
buckets[tmp].append(__A )
# put each buckets' contents into list_of_ints
snake_case_ = 0
for b in range(__A ):
for i in buckets[b]:
snake_case_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class snake_case_ ( _lowerCamelCase ):
__A : Union[str, Any] = 42
__A : Union[str, Any] = 42
__A : Tuple = None
class snake_case_ ( _lowerCamelCase ,_lowerCamelCase ):
__A : str = 2
@register_to_config
def __init__( self : int , lowercase_ : List[Any] = 0.02 , lowercase_ : str = 1_00 , lowercase_ : Tuple = 1.0_07 , lowercase_ : Optional[int] = 80 , lowercase_ : Dict = 0.05 , lowercase_ : Any = 50 , ) -> List[str]:
lowercase__ : List[str] = sigma_max
# setable values
lowercase__ : str = None
lowercase__ : Union[str, Any] = None
lowercase__ : Dict = None # sigma(t_i)
def __UpperCamelCase ( self : List[str] , lowercase_ : List[str] , lowercase_ : Union[str, Any] = None ) -> Optional[Any]:
return sample
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any = None ) -> Union[str, Any]:
lowercase__ : Any = num_inference_steps
lowercase__ : Optional[int] = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowercase__ : Tuple = torch.from_numpy(lowerCAmelCase_ ).to(lowerCAmelCase_ )
lowercase__ : Optional[int] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : List[Any] = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa , device=lowerCAmelCase_ )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : int , lowercase_ : List[Any] = None ) -> Optional[int]:
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : int = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__ : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : Tuple = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase_ ).to(sample.device )
lowercase__ : str = sigma + gamma * sigma
lowercase__ : Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __UpperCamelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple = True , ) -> Union[str, Any]:
lowercase__ : Any = sample_hat + sigma_hat * model_output
lowercase__ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def __UpperCamelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Any = True , ) -> Tuple:
lowercase__ : Dict = sample_prev + sigma_prev * model_output
lowercase__ : Tuple = (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : str ) -> Optional[Any]:
raise NotImplementedError()
| 87
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
lowercase : Tuple = parser.parse_args()
lowercase : Optional[int] = "cpu"
lowercase : Optional[Any] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
lowercase : Optional[int] = "path-to-your-trained-model"
lowercase : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase : Dict = pipe.to(device)
# to channels last
lowercase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
lowercase : int = pipe.vae.to(memory_format=torch.channels_last)
lowercase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase : Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase : Any = torch.randn(2, 4, 64, 64)
lowercase : Optional[int] = torch.rand(1) * 999
lowercase : Optional[Any] = torch.randn(2, 77, 768)
lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
lowercase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Optional[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase : List[str] = 666
lowercase : Tuple = torch.Generator(device).manual_seed(seed)
lowercase : Union[str, Any] = {"generator": generator}
if args.steps is not None:
lowercase : Dict = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase : List[str] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 42
| 0
|
from __future__ import annotations
def lowercase ( SCREAMING_SNAKE_CASE__ : int ) -> list[int]: # This function is recursive
_snake_case : List[Any] = len(__A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_snake_case : Any = array[0]
_snake_case : str = False
_snake_case : Union[str, Any] = 1
_snake_case : Any = []
while not is_found and i < array_length:
if array[i] < pivot:
_snake_case : Tuple = True
_snake_case : Dict = [element for element in array[i:] if element >= array[i]]
_snake_case : Optional[int] = longest_subsequence(__A )
if len(__A ) > len(__A ):
_snake_case : int = temp_array
else:
i += 1
_snake_case : List[Any] = [element for element in array[1:] if element >= pivot]
_snake_case : Optional[int] = [pivot, *longest_subsequence(__A )]
if len(__A ) > len(__A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __UpperCAmelCase ( _lowerCamelCase ):
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return 0.0
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[int | float, int | float]:
_snake_case = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_snake_case = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> None:
_snake_case = 512
_snake_case = [1] + [0] * (size - 1)
_snake_case = [filter_type.process(__A ) for item in inputs]
_snake_case = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case = np.abs(np.fft.fft(__A ) )
_snake_case = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
_snake_case = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(__A )
plt.show()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> None:
_snake_case = 512
_snake_case = [1] + [0] * (size - 1)
_snake_case = [filter_type.process(__A ) for item in inputs]
_snake_case = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 42
| 0
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _A ( _lowerCamelCase ):
_UpperCamelCase : Union[str, Any] = ['''image_processor''', '''tokenizer''']
_UpperCamelCase : List[Any] = '''BlipImageProcessor'''
_UpperCamelCase : Union[str, Any] = '''AutoTokenizer'''
def __init__( self : Dict , _A : int , _A : Optional[int] , _A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
# add QFormer tokenizer
lowercase : List[str] = qformer_tokenizer
def __call__( self : List[str] , _A : Union[str, Any] = None , _A : Optional[int] = None , _A : Union[str, Any] = True , _A : List[Any] = False , _A : Tuple = None , _A : int = None , _A : Optional[Any] = 0 , _A : Optional[Any] = None , _A : Optional[int] = None , _A : Any = False , _A : int = False , _A : Optional[Any] = False , _A : int = False , _A : List[Any] = False , _A : Any = True , _A : Tuple = None , **_A : Any , ) -> List[Any]:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
lowercase : List[Any] = BatchFeature()
if text is not None:
lowercase : Dict = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
encoding.update(lowerCAmelCase_ )
lowercase : int = self.qformer_tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase : Any = qformer_text_encoding.pop('''input_ids''' )
lowercase : int = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
lowercase : List[str] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
encoding.update(lowerCAmelCase_ )
return encoding
def __a ( self : Dict , *_A : List[Any] , **_A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __a ( self : Tuple , *_A : List[str] , **_A : int ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] = self.tokenizer.model_input_names
lowercase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __a ( self : Any , _A : List[str] , **_A : Tuple ) -> Any:
"""simple docstring"""
if os.path.isfile(lowerCAmelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
lowercase : Any = os.path.join(lowerCAmelCase_ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(lowerCAmelCase_ )
return super().save_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def __a ( cls : Any , _A : Dict , **_A : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ , subfolder='''qformer_tokenizer''' )
lowercase : List[Any] = cls._get_arguments_from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
args.append(lowerCAmelCase_ )
return cls(*lowerCAmelCase_ )
| 308
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False , **lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = vocab_size
_snake_case = d_embed
_snake_case = d_proj
_snake_case = cutoffs + [vocab_size]
_snake_case = [0] + self.cutoffs
_snake_case = div_val
_snake_case = self.cutoffs[0]
_snake_case = len(self.cutoffs ) - 1
_snake_case = self.shortlist_size + self.n_clusters
_snake_case = keep_order
_snake_case = []
_snake_case = []
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.n_clusters > 0:
_snake_case = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_weight' )
_snake_case = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_snake_case = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case = self.d_embed // (self.div_val**i)
_snake_case = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' )
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = x
if proj is not None:
_snake_case = tf.einsum('ibd,ed->ibe' , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum('ibd,nd->ibn' , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = tf.range(lp_size[0] , dtype=target.dtype )
_snake_case = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = 0
if self.n_clusters == 0:
_snake_case = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_snake_case = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = []
_snake_case = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_snake_case = (target >= l_idx) & (target < r_idx)
_snake_case = tf.where(lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
_snake_case = self.out_layers[0][0][l_idx:r_idx]
_snake_case = self.out_layers[0][1][l_idx:r_idx]
else:
_snake_case = self.out_layers[i][0]
_snake_case = self.out_layers[i][1]
if i == 0:
_snake_case = tf.concat([cur_W, self.cluster_weight] , 0 )
_snake_case = tf.concat([cur_b, self.cluster_bias] , 0 )
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
_snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
_snake_case = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
_snake_case = tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
_snake_case = tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 42
| 0
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {"vocab_file": "spiece.model"}
lowerCAmelCase : Union[str, Any] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
lowerCAmelCase : List[Any] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
lowerCAmelCase : str = 0
lowerCAmelCase : str = 1
lowerCAmelCase : List[str] = 2
lowerCAmelCase : Optional[Any] = 3
lowerCAmelCase : List[Any] = 4
class __magic_name__ ( _lowerCamelCase ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = "left"
def __init__( self , _a , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.sp_model )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__( self , _a ):
"""simple docstring"""
lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if self.remove_space:
lowerCamelCase = """ """.join(inputs.strip().split() )
else:
lowerCamelCase = inputs
lowerCamelCase = outputs.replace("""``""" , """\"""" ).replace("""\'\'""" , """\"""" )
if not self.keep_accents:
lowerCamelCase = unicodedata.normalize("""NFKD""" , lowerCAmelCase_ )
lowerCamelCase = """""".join([c for c in outputs if not unicodedata.combining(lowerCAmelCase_ )] )
if self.do_lower_case:
lowerCamelCase = outputs.lower()
return outputs
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = self.preprocess_text(lowerCAmelCase_ )
lowerCamelCase = self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
lowerCamelCase = []
for piece in pieces:
if len(lowerCAmelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase = cur_pieces[1:]
else:
lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase_ )
else:
new_pieces.append(lowerCAmelCase_ )
return new_pieces
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCAmelCase_ )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCAmelCase_ )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = """""".join(lowerCAmelCase_ ).replace(lowerCAmelCase_ , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , _a , _a = False , _a = None , _a = True , **_a , ):
"""simple docstring"""
lowerCamelCase = kwargs.pop("""use_source_tokenizer""" , lowerCAmelCase_ )
lowerCamelCase = self.convert_ids_to_tokens(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase = []
lowerCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
lowerCamelCase = []
sub_texts.append(lowerCAmelCase_ )
else:
current_sub_text.append(lowerCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase = """""".join(lowerCAmelCase_ )
lowerCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase = self.clean_up_tokenization(lowerCAmelCase_ )
return clean_text
else:
return text
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , _a , _a = None , _a = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is not None:
return ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1]
return ([0] * len(lowerCAmelCase_ )) + [1, 1]
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , """wb""" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 291
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : Dict = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, float]:
_snake_case = len([g for position, g in enumerate(__A ) if g == main_target[position]] )
return (item, float(__A ))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, str]:
_snake_case = random.randint(0 , len(__A ) - 1 )
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = list(__A )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case = random.choice(__A )
return "".join(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , ) -> list[str]:
_snake_case = []
# Generate more children proportionally to the fitness score.
_snake_case = int(parent_a[1] * 100 ) + 1
_snake_case = 10 if child_n >= 10 else child_n
for _ in range(__A ):
_snake_case = population_score[random.randint(0 , __A )][0]
_snake_case , _snake_case = crossover(parent_a[0] , __A )
# Append new string to the population list.
pop.append(mutate(__A , __A ) )
pop.append(mutate(__A , __A ) )
return pop
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_snake_case = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(__A )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(__A )
# Generate random starting population.
_snake_case = []
for _ in range(__A ):
population.append(''.join([random.choice(__A ) for i in range(len(__A ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case , _snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__A )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case = [evaluate(__A , __A ) for item in population]
# Check if there is a matching evolution.
_snake_case = sorted(__A , key=lambda __A : x[1] , reverse=__A )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__A )
# Normalize population score to be between 0 and 1.
_snake_case = [
(item, score / len(__A )) for item, score in population_score
]
# This is selection
for i in range(__A ):
population.extend(select(population_score[int(__A )] , __A , __A ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__A ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowercase , lowercase , lowercase : Tuple = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 42
| 0
|
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Union[str, Any] = logging.getLogger(__name__)
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_05_22, type=int)
a : str = parser.parse_args()
logger.info(F'Loading data from {args.data_file}')
with open(args.data_file, "rb") as fp:
a : Any = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
a : Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
a : Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
a : Optional[int] = v
logger.info(F'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 311
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["ChineseCLIPFeatureExtractor"]
lowercase : List[Any] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
"""simple docstring"""
a : Optional[int] ="xlnet"
a : Tuple =["mems"]
a : Tuple ={
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=32_000 , snake_case__=1_024 , snake_case__=24 , snake_case__=16 , snake_case__=4_096 , snake_case__="gelu" , snake_case__=True , snake_case__="bi" , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0.1 , snake_case__=512 , snake_case__=None , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=-1 , snake_case__=False , snake_case__="last" , snake_case__=True , snake_case__="tanh" , snake_case__=0.1 , snake_case__=5 , snake_case__=5 , snake_case__=5 , snake_case__=1 , snake_case__=2 , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : List[Any] = d_model
lowerCAmelCase : Optional[Any] = n_layer
lowerCAmelCase : int = n_head
if d_model % n_head != 0:
raise ValueError(f"""\'d_model % n_head\' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
lowerCAmelCase : List[Any] = d_model // n_head
lowerCAmelCase : str = ff_activation
lowerCAmelCase : Tuple = d_inner
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : Any = attn_type
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : List[Any] = dropout
lowerCAmelCase : List[str] = mem_len
lowerCAmelCase : Optional[int] = reuse_len
lowerCAmelCase : Any = bi_data
lowerCAmelCase : int = clamp_len
lowerCAmelCase : Optional[Any] = same_length
lowerCAmelCase : Union[str, Any] = summary_type
lowerCAmelCase : Union[str, Any] = summary_use_proj
lowerCAmelCase : List[str] = summary_activation
lowerCAmelCase : Dict = summary_last_dropout
lowerCAmelCase : Optional[Any] = start_n_top
lowerCAmelCase : Optional[Any] = end_n_top
lowerCAmelCase : Tuple = bos_token_id
lowerCAmelCase : Tuple = pad_token_id
lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , lowerCAmelCase_ , )
lowerCAmelCase : int = kwargs["use_cache"]
lowerCAmelCase : Tuple = use_mems_eval
lowerCAmelCase : Union[str, Any] = use_mems_train
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 108
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = 1
_snake_case = 2
while i * i <= n:
_snake_case = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_snake_case = 1
_snake_case = 1
while True:
i += 1
t_num += i
if count_divisors(__A ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 42
| 0
|
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
a = 50_003
a = 50_002
@require_sentencepiece
@require_tokenizers
class lowercase_ ( _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = PLBartTokenizer
UpperCAmelCase : str = None
UpperCAmelCase : Optional[int] = False
def lowerCAmelCase_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
_A = PLBartTokenizer(lowerCAmelCase_ , language_codes='base' , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Any ):
_A = PLBartTokenizer(lowerCAmelCase_ , language_codes='base' , keep_accents=lowerCAmelCase_ )
_A = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_A = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_A = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_A = tokenizer.vocab_size
_A = [tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )]
self.assertListEqual(lowerCAmelCase_ , ['__java__', '__python__', '__en_XX__', '<mask>'] )
_A = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_A = tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
def lowerCAmelCase_ ( self : Tuple ):
_A = PLBartTokenizer(lowerCAmelCase_ , language_codes='multi' , keep_accents=lowerCAmelCase_ )
_A = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_A = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_A = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_A = tokenizer.vocab_size
_A = [tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )]
self.assertListEqual(
lowerCAmelCase_ , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
_A = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_A = tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Any = '''uclanlp/plbart-python-en_XX'''
UpperCAmelCase : Dict = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCAmelCase : Dict = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCAmelCase : Any = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] ):
_A = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
_A = 1
return cls
def lowerCAmelCase_ ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 50_003 )
def lowerCAmelCase_ ( self : int ):
_A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def lowerCAmelCase_ ( self : str ):
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
_A = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
_A = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
_A = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def lowerCAmelCase_ ( self : int ):
_A = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase_ )
_A = 10
_A = self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase_ ( self : int ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [50_004, 50_001] )
def lowerCAmelCase_ ( self : str ):
_A = tempfile.mkdtemp()
_A = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
_A = PLBartTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
_A = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors='pt' )
_A = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
_A = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_A = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_A = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase_ ( self : Any ):
_A = self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors='pt' )
_A = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors='pt' )
_A = targets['input_ids']
_A = shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase_ ( self : Any ):
_A = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# A, test, EOS, en_XX
'input_ids': [[150, 242, 2, 50_003]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 50_001,
} , )
| 315
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __UpperCAmelCase ( _lowerCamelCase ):
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='fill-mask' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_snake_case = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='fill-mask' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
_snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_snake_case = self.get_env()
_snake_case = '1'
_snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import AutoModel\n '
_snake_case = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 42
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__snake_case = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__snake_case = {
"yjernite/retribert-base-uncased": 5_1_2,
}
__snake_case = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class __lowerCamelCase (_lowerCamelCase ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = PRETRAINED_INIT_CONFIGURATION
_lowercase = RetriBertTokenizer
_lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self: Tuple,A_: List[str]=None,A_: Union[str, Any]=None,A_: Optional[Any]=True,A_: int="[UNK]",A_: Tuple="[SEP]",A_: Tuple="[PAD]",A_: int="[CLS]",A_: Optional[Any]="[MASK]",A_: List[Any]=True,A_: List[str]=None,**A_: Any,):
'''simple docstring'''
super().__init__(
lowerCAmelCase_,tokenizer_file=lowerCAmelCase_,do_lower_case=lowerCAmelCase_,unk_token=lowerCAmelCase_,sep_token=lowerCAmelCase_,pad_token=lowerCAmelCase_,cls_token=lowerCAmelCase_,mask_token=lowerCAmelCase_,tokenize_chinese_chars=lowerCAmelCase_,strip_accents=lowerCAmelCase_,**lowerCAmelCase_,)
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase',lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents',lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars',lowerCAmelCase_ ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowerCAmelCase_,normalizer_state.pop('type' ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowerCAmelCase_ )
__UpperCamelCase = do_lower_case
def snake_case_ ( self: List[str],A_: List[str],A_: str=None ):
'''simple docstring'''
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self: Tuple,A_: Dict,A_: Union[str, Any] = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self: List[Any],A_: str,A_: Optional[Any] = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(lowerCAmelCase_,name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 310
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCAmelCase_ ).to(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained('google/mt5-small' )
_snake_case = tokenizer('Hello there' , return_tensors='pt' ).input_ids
_snake_case = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
_snake_case = model(input_ids.to(lowerCAmelCase_ ) , labels=labels.to(lowerCAmelCase_ ) ).loss
_snake_case = -(labels.shape[-1] * loss.item())
_snake_case = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 42
| 0
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowercase__ ( unittest.TestCase):
def __init__( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=13 , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : List[str]=37 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[str]=512 , UpperCamelCase__ : str=16 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Dict=4 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE : str = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = num_choices
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase__ ( _lowerCamelCase , unittest.TestCase):
UpperCamelCase_ = True
UpperCamelCase_ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = FlaxBertModelTester(self )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = FlaxBertModel.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase_ )
| 182
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : List[str] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
snake_case_ : List[str] = logging.getLogger(__name__)
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
# save results
if os.path.exists(__A ):
if os.path.exists(os.path.join(__A , 'config.json' ) ) and os.path.isfile(
os.path.join(__A , 'config.json' ) ):
os.remove(os.path.join(__A , 'config.json' ) )
if os.path.exists(os.path.join(__A , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__A , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__A , 'pytorch_model.bin' ) )
else:
os.makedirs(__A )
model.save_pretrained(__A )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=False ):
_UpperCamelCase : Union[str, Any] = 2
if unlogit:
_UpperCamelCase : Optional[Any] = torch.pow(__A , __A )
_UpperCamelCase : int = p * torch.log(__A )
_UpperCamelCase : List[Any] = 0
return -plogp.sum(dim=-1 )
def A__ ( UpperCAmelCase_ ):
logger.info('lv, h >\t' + '\t'.join(f'{x + 1}' for x in range(len(__A ) ) ) )
for row in range(len(__A ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=False ):
_UpperCamelCase , _UpperCamelCase : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_UpperCamelCase : Any = torch.zeros(__A , __A ).to(args.device )
_UpperCamelCase : Dict = torch.zeros(__A , __A ).to(args.device )
if head_mask is None:
_UpperCamelCase : Union[str, Any] = torch.ones(__A , __A ).to(args.device )
head_mask.requires_grad_(requires_grad=__A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_UpperCamelCase : Dict = None
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Tuple = 0.0
for step, inputs in enumerate(tqdm(__A , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_UpperCamelCase : Tuple = tuple(t.to(args.device ) for t in inputs )
((_UpperCamelCase ) , ) : str = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_UpperCamelCase : Optional[Any] = model(__A , labels=__A , head_mask=__A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A ):
_UpperCamelCase : Union[str, Any] = entropy(attn.detach() , __A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_UpperCamelCase : Any = 2
_UpperCamelCase : Optional[int] = torch.pow(torch.pow(__A , __A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_UpperCamelCase : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__A )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__A )
logger.info('Head ranked by importance scores' )
_UpperCamelCase : int = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_UpperCamelCase : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_UpperCamelCase : Any = head_ranks.view_as(__A )
print_ad_tensor(__A )
return attn_entropy, head_importance, total_loss
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = compute_heads_importance(__A , __A , __A , compute_entropy=__A )
_UpperCamelCase : Any = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __A , original_score * args.masking_threshold )
_UpperCamelCase : Optional[int] = torch.ones_like(__A )
_UpperCamelCase : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_UpperCamelCase : Any = original_score
while current_score >= original_score * args.masking_threshold:
_UpperCamelCase : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_UpperCamelCase : Tuple = float('Inf' )
_UpperCamelCase : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__A ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_UpperCamelCase : Dict = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_UpperCamelCase : Tuple = new_head_mask.view(-1 )
_UpperCamelCase : Optional[int] = 0.0
_UpperCamelCase : str = new_head_mask.view_as(__A )
_UpperCamelCase : Tuple = new_head_mask.clone().detach()
print_ad_tensor(__A )
# Compute metric and head importance again
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Union[str, Any] = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A )
_UpperCamelCase : Any = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(__A )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = datetime.now()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A )
_UpperCamelCase : str = 1 / loss
_UpperCamelCase : str = datetime.now() - before_time
_UpperCamelCase : str = sum(p.numel() for p in model.parameters() )
_UpperCamelCase : Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A ) )
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A ):
_UpperCamelCase : int = [
v,
]
assert sum(len(__A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A )
_UpperCamelCase : Union[str, Any] = sum(p.numel() for p in model.parameters() )
_UpperCamelCase : List[str] = datetime.now()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
_UpperCamelCase : List[Any] = 1 / loss
_UpperCamelCase : int = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __A , __A , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __A , __A )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(__A , args.output_dir )
def A__ ( ):
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__A , type=__A , required=__A , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__A , type=__A , required=__A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__A , type=__A , required=__A , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__A , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__A , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__A , type=__A , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__A , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__A , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__A , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__A , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=__A , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__A , help='Batch size.' )
parser.add_argument('--seed' , type=__A , default=4_2 )
parser.add_argument('--local_rank' , type=__A , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__A , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__A , default='' , help='Can be used for distant debugging.' )
_UpperCamelCase : Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_UpperCamelCase : Dict = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_UpperCamelCase : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_UpperCamelCase : Dict = torch.device('cuda' , args.local_rank )
_UpperCamelCase : Optional[int] = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_UpperCamelCase : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_UpperCamelCase : Optional[int] = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A )
elif args.n_gpu > 1:
_UpperCamelCase : Optional[int] = nn.DataParallel(__A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A )
torch.save(__A , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __A )
# Prepare dataset
_UpperCamelCase : int = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_UpperCamelCase : str = (torch.from_numpy(__A ),)
_UpperCamelCase : int = TensorDataset(*__A )
_UpperCamelCase : List[str] = RandomSampler(__A )
_UpperCamelCase : Optional[int] = DataLoader(__A , sampler=__A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_UpperCamelCase : Optional[Any] = mask_heads(__A , __A , __A )
prune_heads(__A , __A , __A , __A )
if __name__ == "__main__":
main()
| 83
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowercase : set[int] = {ord(char) for char in VALID_CHARS}
lowercase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str | None:
_snake_case = ""
_snake_case = 42
_snake_case = 42
_snake_case = 42
for keychar, cipherchar in zip(cycle(__A ) , __A ):
_snake_case = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__A )
return decoded
def SCREAMING_SNAKE_CASE__ ( __A ) -> list[str]:
_snake_case = []
for key in product(__A , repeat=3 ):
_snake_case = try_key(__A , __A )
if encoded is not None:
possibles.append(__A )
return possibles
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__ ( __A = "p059_cipher.txt" ) -> int:
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = Path(__A ).parent.joinpath(__A ).read_text(encoding='utf-8' )
_snake_case = [int(__A ) for number in data.strip().split(',' )]
_snake_case = filter_valid_chars(__A )
for common_word in COMMON_WORDS:
_snake_case = filter_common_word(__A , __A )
if len(__A ) == 1:
break
_snake_case = possibles[0]
return sum(ord(__A ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.