code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowerCAmelCase: Any = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowercase_ (lowercase__ ):
def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]:
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(lowercase_)
def __UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_) -> str:
a__ , a__ ={}, {}
if padding is not None:
a__ =padding
if truncation is not None:
a__ =truncation
if top_k is not None:
a__ =top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase_ , lowercase_ = None , **lowercase_) -> List[Any]:
if isinstance(lowercase_ , (Image.Image, str)) and isinstance(lowercase_ , lowercase_):
a__ ={'image': image, 'question': question}
else:
a__ =image
a__ =super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCamelCase ( self , lowercase_ , lowercase_=False , lowercase_=False) -> Optional[int]:
a__ =load_image(inputs['image'])
a__ =self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_)
a__ =self.image_processor(images=lowercase_ , return_tensors=self.framework)
model_inputs.update(lowercase_)
return model_inputs
def __UpperCamelCase ( self , lowercase_) -> Optional[Any]:
a__ =self.model(**lowercase_)
return model_outputs
def __UpperCamelCase ( self , lowercase_ , lowercase_=5) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
a__ =self.model.config.num_labels
if self.framework == "pt":
a__ =model_outputs.logits.sigmoid()[0]
a__ , a__ =probs.topk(lowercase_)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
a__ =scores.tolist()
a__ =ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_)]
| 20 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''wavlm'''
def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = conv_bias
snake_case__ = num_buckets
snake_case__ = max_bucket_distance
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim)
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 654 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def A__ ( self :List[str] ):
'''simple docstring'''
super().setUp()
def A__ ( self :List[str] , **__snake_case :str ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__snake_case )
def A__ ( self :str , **__snake_case :Dict ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__snake_case )
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : List[str] ="""永和服装饰品有限公司,今天天气非常好"""
__magic_name__ : Any ="""永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.get_tokenizer()
__magic_name__ , __magic_name__ : Optional[int] =self.get_chinese_input_output_texts()
__magic_name__ : Any =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
__magic_name__ : Any =tokens + [tokenizer.unk_token]
__magic_name__ : Union[str, Any] =[2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : str =self.get_rust_tokenizer()
__magic_name__ , __magic_name__ : Any =self.get_chinese_input_output_texts()
__magic_name__ : Any =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
__magic_name__ : Optional[int] =tokens + [tokenizer.unk_token]
__magic_name__ : Any =[2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
def A__ ( self :Dict ):
'''simple docstring'''
pass
def A__ ( self :Tuple ):
'''simple docstring'''
pass
| 21 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : UNetaDModel
_lowercase : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__)
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
snake_case__ = self.unet.config.sample_size
snake_case__ = (batch_size, 3, img_size, img_size)
snake_case__ = self.unet
snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma
snake_case__ = sample.to(self.device)
self.scheduler.set_timesteps(UpperCamelCase__)
self.scheduler.set_sigmas(UpperCamelCase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# prediction step
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__)
snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean
snake_case__ = sample_mean.clamp(0 , 1)
snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__)
| 654 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A ( _a ):
lowercase_ = 42
class A ( nn.Module ):
def __init__( self : str , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Dict=("DownEncoderBlock2D",) , lowerCAmelCase_ : int=(64,) , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : int="silu" , lowerCAmelCase_ : Optional[Any]=True , ) -> List[str]:
"""simple docstring"""
super().__init__()
_a = layers_per_block
_a = torch.nn.Convad(
lowerCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_a = None
_a = nn.ModuleList([] )
# down
_a = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase_ ):
_a = output_channel
_a = block_out_channels[i]
_a = i == len(lowerCAmelCase_ ) - 1
_a = get_down_block(
lowerCAmelCase_ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
_a = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# out
_a = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase_ , eps=1e-6 )
_a = nn.SiLU()
_a = 2 * out_channels if double_z else out_channels
_a = nn.Convad(block_out_channels[-1] , lowerCAmelCase_ , 3 , padding=1 )
_a = False
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = x
_a = self.conv_in(lowerCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ : Tuple ):
def custom_forward(*lowerCAmelCase_ : Tuple ):
return module(*lowerCAmelCase_ )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
# middle
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
for down_block in self.down_blocks:
_a = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ )
# middle
_a = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
_a = down_block(lowerCAmelCase_ )
# middle
_a = self.mid_block(lowerCAmelCase_ )
# post-process
_a = self.conv_norm_out(lowerCAmelCase_ )
_a = self.conv_act(lowerCAmelCase_ )
_a = self.conv_out(lowerCAmelCase_ )
return sample
class A ( nn.Module ):
def __init__( self : int , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Union[str, Any]=("UpDecoderBlock2D",) , lowerCAmelCase_ : List[str]=(64,) , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Union[str, Any]="silu" , lowerCAmelCase_ : Dict="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
_a = layers_per_block
_a = nn.Convad(
lowerCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_a = None
_a = nn.ModuleList([] )
_a = in_channels if norm_type == '''spatial''' else None
# mid
_a = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# up
_a = list(reversed(lowerCAmelCase_ ) )
_a = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
_a = output_channel
_a = reversed_block_out_channels[i]
_a = i == len(lowerCAmelCase_ ) - 1
_a = get_up_block(
lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , resnet_time_scale_shift=lowerCAmelCase_ , )
self.up_blocks.append(lowerCAmelCase_ )
_a = output_channel
# out
if norm_type == "spatial":
_a = SpatialNorm(block_out_channels[0] , lowerCAmelCase_ )
else:
_a = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase_ , eps=1e-6 )
_a = nn.SiLU()
_a = nn.Convad(block_out_channels[0] , lowerCAmelCase_ , 3 , padding=1 )
_a = False
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
_a = z
_a = self.conv_in(lowerCAmelCase_ )
_a = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ : Dict ):
def custom_forward(*lowerCAmelCase_ : Tuple ):
return module(*lowerCAmelCase_ )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
_a = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
# middle
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ )
_a = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_a = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# middle
_a = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
_a = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_a = up_block(lowerCAmelCase_ , lowerCAmelCase_ )
# post-process
if latent_embeds is None:
_a = self.conv_norm_out(lowerCAmelCase_ )
else:
_a = self.conv_norm_out(lowerCAmelCase_ , lowerCAmelCase_ )
_a = self.conv_act(lowerCAmelCase_ )
_a = self.conv_out(lowerCAmelCase_ )
return sample
class A ( nn.Module ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple="random" , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=True ) -> List[Any]:
"""simple docstring"""
super().__init__()
_a = n_e
_a = vq_embed_dim
_a = beta
_a = legacy
_a = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_a = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
_a = self.used.shape[0]
_a = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_a = self.re_embed
_a = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
_a = n_e
_a = sane_index_shape
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
_a = inds.shape
assert len(lowerCAmelCase_ ) > 1
_a = inds.reshape(ishape[0] , -1 )
_a = self.used.to(lowerCAmelCase_ )
_a = (inds[:, :, None] == used[None, None, ...]).long()
_a = match.argmax(-1 )
_a = match.sum(2 ) < 1
if self.unknown_index == "random":
_a = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_a = self.unknown_index
return new.reshape(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
_a = inds.shape
assert len(lowerCAmelCase_ ) > 1
_a = inds.reshape(ishape[0] , -1 )
_a = self.used.to(lowerCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_a = 0 # simply set to zero
_a = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase_ )
return back.reshape(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> int:
"""simple docstring"""
_a = z.permute(0 , 2 , 3 , 1 ).contiguous()
_a = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_a = torch.argmin(torch.cdist(lowerCAmelCase_ , self.embedding.weight ) , dim=1 )
_a = self.embedding(lowerCAmelCase_ ).view(z.shape )
_a = None
_a = None
# compute loss for embedding
if not self.legacy:
_a = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_a = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_a = z + (z_q - z).detach()
# reshape back to match original input shape
_a = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_a = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_a = self.remap_to_used(lowerCAmelCase_ )
_a = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_a = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
if self.remap is not None:
_a = indices.reshape(shape[0] , -1 ) # add batch axis
_a = self.unmap_to_all(lowerCAmelCase_ )
_a = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_a = self.embedding(lowerCAmelCase_ )
if shape is not None:
_a = z_q.view(lowerCAmelCase_ )
# reshape back to match original input shape
_a = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A ( _a ):
def __init__( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
_a = parameters
_a , _a = torch.chunk(lowerCAmelCase_ , 2 , dim=1 )
_a = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
_a = deterministic
_a = torch.exp(0.5 * self.logvar )
_a = torch.exp(self.logvar )
if self.deterministic:
_a = _a = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
_a = randn_tensor(
self.mean.shape , generator=lowerCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_a = self.mean + self.std * sample
return x
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[str]=None ) -> int:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str]=[1, 2, 3] ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
_a = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
return self.mean
| 22 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def __magic_name__ ( self : int):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : str):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 654 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def _snake_case ():
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
def _snake_case ():
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@require_beam
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> List[str]:
import apache_beam as beam
UpperCamelCase_ = beam.io.parquetio.WriteToParquet
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 23 |
a__ = [0, 2, 4, 6, 8]
a__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ = 0
for digit in range(10 ):
snake_case__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a , a )
return result
snake_case__ = 0
for digita in range(10 ):
snake_case__ = digita
if (remainder + digita) % 2 == 0:
snake_case__ = ODD_DIGITS
else:
snake_case__ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , )
return result
def _UpperCAmelCase ( a : int = 9 ):
snake_case__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a , 0 , [0] * length , a )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str )-> Any:
'''simple docstring'''
__snake_case = WavaVecaForSequenceClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__snake_case = downstream_dict['''projector.weight''']
__snake_case = downstream_dict['''projector.bias''']
__snake_case = downstream_dict['''model.post_net.linear.weight''']
__snake_case = downstream_dict['''model.post_net.linear.bias''']
return model
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : int )-> Any:
'''simple docstring'''
__snake_case = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__snake_case = downstream_dict['''model.linear.weight''']
__snake_case = downstream_dict['''model.linear.bias''']
return model
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] )-> Tuple:
'''simple docstring'''
__snake_case = WavaVecaForXVector.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__snake_case = downstream_dict['''connector.weight''']
__snake_case = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__snake_case = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__snake_case = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__snake_case = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] )-> str:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )
__snake_case = checkpoint['''Downstream''']
__snake_case = WavaVecaConfig.from_pretrained(_lowerCamelCase )
__snake_case = WavaVecaFeatureExtractor.from_pretrained(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , do_normalize=_lowerCamelCase )
__snake_case = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__snake_case = convert_classification(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith('''ForAudioFrameClassification''' ):
__snake_case = convert_diarization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith('''ForXVector''' ):
__snake_case = convert_xvector(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__snake_case = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 24 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = '''facebook/nllb-200-distilled-600M'''
_lowercase : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase : Optional[int] = '''translator'''
_lowercase : Optional[Any] = AutoTokenizer
_lowercase : Dict = AutoModelForSeqaSeqLM
_lowercase : List[str] = LANGUAGE_CODES
_lowercase : Optional[Any] = ['''text''', '''text''', '''text''']
_lowercase : Tuple = ['''text''']
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
snake_case__ = self.lang_to_code[src_lang]
snake_case__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__)
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.model.generate(**UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
| 654 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
SCREAMING_SNAKE_CASE : Dict = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(a ) , torch_builtin(a ) ) )
self.assertFalse(torch.allclose(gelu_python(a ) , gelu_new(a ) ) )
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
SCREAMING_SNAKE_CASE : int = get_activation("gelu" )
SCREAMING_SNAKE_CASE : str = get_activation("gelu_10" )
SCREAMING_SNAKE_CASE : Optional[Any] = torch_builtin(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = geluaa(a )
SCREAMING_SNAKE_CASE : Optional[int] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(a ):
get_activation("bogus" )
with self.assertRaises(a ):
get_activation(a )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_activation("gelu" )
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Optional[Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = acta.a | 25 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( a : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = module
snake_case__ = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , )
snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str):
'''simple docstring'''
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : Any = 2.109_6595_5269_2574
_lowercase : Tuple = '''Hello my name is'''
_lowercase : List[Any] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : List[str] = 10
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = AutoTokenizer.from_pretrained(self.model_name)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : str):
'''simple docstring'''
super().setUp()
# Models and tokenizer
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""")
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config"""))
snake_case__ = config.to_dict()
snake_case__ = config.to_diff_dict()
snake_case__ = config.to_json_string()
def __magic_name__ ( self : Dict):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
snake_case__ = self.model_fpaa.get_memory_footprint()
snake_case__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
snake_case__ = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
snake_case__ = True
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__):
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__):
# Tries with `str`
self.model_abit.to("""cpu""")
with self.assertRaises(UpperCamelCase__):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0"""))
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_fpaa.to(torch.floataa)
snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
snake_case__ = self.model_fpaa.to("""cpu""")
# Check this does not throw an error
snake_case__ = self.model_fpaa.half()
# Check this does not throw an error
snake_case__ = self.model_fpaa.float()
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""")
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __magic_name__ ( cls : Optional[Any]):
'''simple docstring'''
snake_case__ = """t5-small"""
snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
snake_case__ = AutoTokenizer.from_pretrained(cls.model_name)
snake_case__ = """Translate in German: Hello, my dog is cute"""
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any):
'''simple docstring'''
from transformers import TaForConditionalGeneration
snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules
snake_case__ = None
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
snake_case__ = modules
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
super().setUp()
# model_name
snake_case__ = """bigscience/bloom-560m"""
snake_case__ = """t5-small"""
# Different types of model
snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Sequence classification model
snake_case__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# CausalLM model
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Seq2seq model
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : List[str]):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
snake_case__ = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""")
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
# Second real batch
snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = """facebook/opt-350m"""
super().setUp()
def __magic_name__ ( self : Any):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""):
return
# Step 1: freeze all parameters
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
snake_case__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
snake_case__ = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__)):
snake_case__ = LoRALayer(module.q_proj , rank=1_6)
snake_case__ = LoRALayer(module.k_proj , rank=1_6)
snake_case__ = LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
snake_case__ = model.forward(**UpperCamelCase__)
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(UpperCamelCase__ , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = '''gpt2-xl'''
_lowercase : Any = 3.3191_8548_5415_2187
| 654 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a__ = """"""
a__ = """"""
a__ = """"""
a__ = 1 # (0 is vertical, 1 is horizontal)
def _UpperCAmelCase ( ):
snake_case__ , snake_case__ = get_dataset(a , a )
print("""Processing...""" )
snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ = random_chars(32 )
snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(a )} with {file_name}''' )
snake_case__ = []
for anno in new_annos[index]:
snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(a )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = []
snake_case__ = []
for label_file in glob.glob(os.path.join(a , """*.txt""" ) ):
snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(a ) as in_file:
snake_case__ = in_file.readlines()
snake_case__ = os.path.join(a , F'''{label_name}.jpg''' )
snake_case__ = []
for obj_list in obj_lists:
snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _UpperCAmelCase ( a : list , a : list , a : int = 1 ):
snake_case__ = []
snake_case__ = []
snake_case__ = []
for idx in range(len(a ) ):
snake_case__ = []
snake_case__ = img_list[idx]
path_list.append(a )
snake_case__ = anno_list[idx]
snake_case__ = cva.imread(a )
if flip_type == 1:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _UpperCAmelCase ( a : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 654 | 0 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
_A = int(number**0.5 )
return number == sq * sq
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
"""simple docstring"""
_A = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_A = x_den * y_den * z_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 35 ) -> int:
"""simple docstring"""
_A = set()
_A = 42
_A = Fraction(0 )
_A = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_A = x_num * y_den + x_den * y_num
_A = x_den * y_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_A = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_A = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_A = x_num * y_num
_A = x_den * y_num + x_num * y_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_A = x_num * x_num * y_num * y_num
_A = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 27 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a__ = 5_0_0_0_0_0
a__ , a__ = os.path.split(__file__)
a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ):
snake_case__ = dataset.map(**a )
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ):
snake_case__ = dataset.filter(**a )
def _UpperCAmelCase ( ):
snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case__ = generate_example_dataset(
os.path.join(a , """dataset.arrow""" ) , a , num_examples=a )
snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a )
def tokenize(a : Union[str, Any] ):
return tokenizer(examples["""text"""] )
snake_case__ = map(a )
snake_case__ = map(a , batched=a )
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""numpy""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""pandas""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
snake_case__ = map(a , function=a , batched=a )
snake_case__ = filter(a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a , """wb""" ) as f:
f.write(json.dumps(a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 654 | 0 |
'''simple docstring'''
UpperCamelCase_ = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def lowercase__( __UpperCamelCase: dict ,__UpperCamelCase: Dict ,__UpperCamelCase: List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE : List[Any] = queue.pop(0 )
# get the last node from the path
SCREAMING_SNAKE_CASE : Optional[Any] = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE : int = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE : Tuple = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def lowercase__( __UpperCamelCase: dict ,__UpperCamelCase: Dict ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE : str = [start]
SCREAMING_SNAKE_CASE : List[Any] = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE : int = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE : Optional[Any] = queue.pop(0 )
if node == target:
SCREAMING_SNAKE_CASE : Optional[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] ,dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 28 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : List[str] , a : Any=False ):
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = """"""
else:
snake_case__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ):
snake_case__ = dct.pop(a )
snake_case__ = val
def _UpperCAmelCase ( ):
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : List[str] , a : Tuple ):
snake_case__ = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ = 1000
snake_case__ = """huggingface/label-files"""
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(deit_name[-6:-4] )
snake_case__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(a , pretrained=a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
snake_case__ = create_rename_keys(a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ = encoding["""pixel_values"""]
snake_case__ = model(a )
snake_case__ = timm_model(a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 654 | 0 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : torch.FloatTensor
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = num_attention_heads * attention_head_dim
snake_case__ = additional_embeddings
snake_case__ = time_embed_dim or inner_dim
snake_case__ = embedding_proj_dim or embedding_dim
snake_case__ = clip_embed_dim or embedding_dim
snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0)
snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if embedding_proj_norm_type is None:
snake_case__ = None
elif embedding_proj_norm_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''')
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if encoder_hid_proj_type is None:
snake_case__ = None
elif encoder_hid_proj_type == "linear":
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''')
snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__))
if added_emb_type == "prd":
snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__))
elif added_emb_type is None:
snake_case__ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''')
snake_case__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__)
])
if norm_in_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
elif norm_in_type is None:
snake_case__ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''')
snake_case__ = nn.LayerNorm(UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
snake_case__ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__)
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]):
if hasattr(UpperCamelCase__ , """set_processor"""):
snake_case__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
snake_case__ = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]):
if hasattr(UpperCamelCase__ , """set_processor"""):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = hidden_states.shape[0]
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.time_proj(UpperCamelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ = timesteps_projected.to(dtype=self.dtype)
snake_case__ = self.time_embedding(UpperCamelCase__)
if self.embedding_proj_norm is not None:
snake_case__ = self.embedding_proj_norm(UpperCamelCase__)
snake_case__ = self.embedding_proj(UpperCamelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
snake_case__ = self.proj_in(UpperCamelCase__)
snake_case__ = self.positional_embedding.to(hidden_states.dtype)
snake_case__ = []
snake_case__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
snake_case__ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
snake_case__ = hidden_states[:, None, :]
snake_case__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1)
additional_embeds.append(UpperCamelCase__)
snake_case__ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0)
snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
snake_case__ = self.norm_in(UpperCamelCase__)
for block in self.transformer_blocks:
snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__)
snake_case__ = self.norm_out(UpperCamelCase__)
if self.prd_embedding is not None:
snake_case__ = hidden_states[:, -1]
else:
snake_case__ = hidden_states[:, additional_embeddings_len:]
snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654 | 0 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase_ : int = quote(_lowercase )
return hfh.hf_hub_url(_lowercase , _lowercase , repo_type='''dataset''' , revision=_lowercase ) | 30 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a__ = ["""gpt2"""]
a__ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = tokenizer
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__)
snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),))
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.tokenizer(UpperCamelCase__)
snake_case__ = tokenized["""input_ids"""].to_tensor()
snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
super().setUp()
snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
snake_case__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1]))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""")
snake_case__ = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case__ = python_outputs[key].numpy()
snake_case__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values))
@slow
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.function(UpperCamelCase__)
for test_inputs in self.test_sentences:
snake_case__ = tf.constant(UpperCamelCase__)
snake_case__ = compiled_tokenizer(UpperCamelCase__)
snake_case__ = tf_tokenizer(UpperCamelCase__)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = ModelToSave(tokenizer=UpperCamelCase__)
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case__ = Path(UpperCamelCase__) / """saved.model"""
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving})
snake_case__ = tf.saved_model.load(UpperCamelCase__)
snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def __magic_name__ ( self : Tuple):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs
snake_case__ = tf_tokenizer.get_config()
snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__)
snake_case__ = model_from_config(UpperCamelCase__)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def __magic_name__ ( self : Dict):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case__ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__)
snake_case__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 654 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]=14 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[Any]=99 , _lowerCAmelCase : Dict=32 , _lowerCAmelCase : Optional[int]=5 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Union[str, Any]=37 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Tuple=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : List[str]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = use_mc_token_ids
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = self.vocab_size - 1
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase_ ( self : Optional[int] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , *_lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = CTRLModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase )
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , *_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = CTRLLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , *_lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = CTRLForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase_ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = CTRLModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=37 )
def lowerCAmelCase_ ( self : int ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : int ):
pass
@slow
def lowerCAmelCase_ ( self : str ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = CTRLModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase_ ( self : int ):
pass
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=_lowerCAmelCase ) # Legal the president is
SCREAMING_SNAKE_CASE_ = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , _lowerCAmelCase ) | 31 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = (IPNDMScheduler,)
_lowercase : int = (('''num_inference_steps''', 50),)
def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = {"""num_train_timesteps""": 1_0_0_0}
config.update(**UpperCamelCase__)
return config
def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = 1_0
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
return sample
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""):
scheduler.set_timesteps(UpperCamelCase__)
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
| 654 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A : Optional[str] = field(
default=A__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
__A : Optional[str] = field(
default=A__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A : bool = field(default=A__ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__A : Optional[str] = field(
default=A__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __UpperCamelCase :
__A : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
__A : Optional[str] = field(
default=A__ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
__A : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A : bool = field(
default=A__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def A__ ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
_UpperCAmelCase = import_module('''tasks''' )
try:
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , model_args.task_type )
_UpperCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_UpperCAmelCase = token_classification_task.get_labels(data_args.labels )
_UpperCAmelCase = dict(enumerate(SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE_ )} , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_UpperCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE_ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE_ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ) -> Tuple[List[int], List[int]]:
_UpperCAmelCase = np.argmax(SCREAMING_SNAKE_CASE_ , axis=2 )
_UpperCAmelCase , _UpperCAmelCase = preds.shape
_UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
_UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE_ : EvalPrediction ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
"precision": precision_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
"recall": recall_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
"f1": fa_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
}
# Data collator
_UpperCAmelCase = DataCollatorWithPadding(SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(SCREAMING_SNAKE_CASE_ )
# Predict
if training_args.do_predict:
_UpperCAmelCase = TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE_ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = trainer.predict(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase , _UpperCAmelCase = align_predictions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
_UpperCAmelCase = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return results
def A__ ( SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 32 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
_lowercase : Dict = '''CIDAS/clipseg-rd64-refined'''
_lowercase : List[Any] = '''image_segmenter'''
_lowercase : Tuple = CLIPSegForImageSegmentation
_lowercase : str = ['''image''', '''text''']
_lowercase : Dict = ['''image''']
def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(self , ["""vision"""])
super().__init__(*UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors="""pt""")
def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
with torch.no_grad():
snake_case__ = self.model(**UpperCamelCase__).logits
return logits
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = outputs.cpu().detach().numpy()
snake_case__ = 0
snake_case__ = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 654 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : List[Any] = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=1_8 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=True , ):
'''simple docstring'''
snake_case__ = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = apply_ocr
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessingTester(self)
@property
def __magic_name__ ( self : Tuple):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize"""))
self.assertTrue(hasattr(UpperCamelCase__ , """size"""))
self.assertTrue(hasattr(UpperCamelCase__ , """apply_ocr"""))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8})
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2})
def __magic_name__ ( self : List[str]):
'''simple docstring'''
pass
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase__)
self.assertIsInstance(encoding.boxes , UpperCamelCase__)
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""")
snake_case__ = Image.open(ds[0]["""file"""]).convert("""RGB""")
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase__)
self.assertListEqual(encoding.boxes , UpperCamelCase__)
# with apply_OCR = False
snake_case__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__)
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
| 654 | 0 |
"""simple docstring"""
from math import factorial
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = real
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [1] * rank
else:
UpperCamelCase = rank
def __repr__( self) -> Any:
return (
F'{self.real}+'
F'{"+".join(str(lowerCamelCase_)+"E"+str(n+1)for n,dual in enumerate(self.duals))}'
)
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1)
return Dual(self.real , lowerCamelCase_)
def __add__( self , lowerCamelCase_) -> List[str]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
return Dual(self.real + other , self.duals)
UpperCamelCase = self.duals.copy()
UpperCamelCase = other.duals.copy()
if len(lowerCamelCase_) > len(lowerCamelCase_):
o_dual.extend([1] * (len(lowerCamelCase_) - len(lowerCamelCase_)))
elif len(lowerCamelCase_) < len(lowerCamelCase_):
s_dual.extend([1] * (len(lowerCamelCase_) - len(lowerCamelCase_)))
UpperCamelCase = []
for i in range(len(lowerCamelCase_)):
new_duals.append(s_dual[i] + o_dual[i])
return Dual(self.real + other.real , lowerCamelCase_)
A_ = __add__
def __sub__( self , lowerCamelCase_) -> str:
return self + other * -1
def __mul__( self , lowerCamelCase_) -> Union[str, Any]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = []
for i in self.duals:
new_duals.append(i * other)
return Dual(self.real * other , lowerCamelCase_)
UpperCamelCase = [0] * (len(self.duals) + len(other.duals) + 1)
for i, item in enumerate(self.duals):
for j, jtem in enumerate(other.duals):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals)):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals)):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase_)
A_ = __mul__
def __truediv__( self , lowerCamelCase_) -> List[str]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = []
for i in self.duals:
new_duals.append(i / other)
return Dual(self.real / other , lowerCamelCase_)
raise ValueError
def __floordiv__( self , lowerCamelCase_) -> Optional[Any]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = []
for i in self.duals:
new_duals.append(i // other)
return Dual(self.real // other , lowerCamelCase_)
raise ValueError
def __pow__( self , lowerCamelCase_) -> str:
if n < 0 or isinstance(lowerCamelCase_ , lowerCamelCase_):
raise ValueError('''power must be a positive integer''')
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase = self
for _ in range(n - 1):
x *= self
return x
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
if not callable(_lowercase ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(_lowercase ,(float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(_lowercase ,_lowercase ):
raise ValueError('''differentiate() requires an int as input for order''' )
UpperCamelCase = Dual(_lowercase ,1 )
UpperCamelCase = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __snake_case ( _lowercase ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2)) | 34 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = params
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array([len(UpperCamelCase__) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , UpperCamelCase__ : Any):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any]):
'''simple docstring'''
return len(self.lengths)
def __magic_name__ ( self : str):
'''simple docstring'''
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.params.max_model_input_size
snake_case__ = self.lengths > max_len
logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''')
def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple):
return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)]
snake_case__ = []
snake_case__ = []
if self.params.mlm:
snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
snake_case__ = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__)
if sub_s[-1] != sep_id:
snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__)
assert len(UpperCamelCase__) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCamelCase__)
new_tok_ids.extend(UpperCamelCase__)
new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs])
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array(UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = len(self)
snake_case__ = self.lengths > 1_1
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''')
def __magic_name__ ( self : List[str]):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = len(self)
snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
snake_case__ = (unk_occs / self.lengths) < 0.5
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''')
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self)} sequences''')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = [t[0] for t in batch]
snake_case__ = [t[1] for t in batch]
assert len(UpperCamelCase__) == len(UpperCamelCase__)
# Max for paddings
snake_case__ = max(UpperCamelCase__)
# Pad token ids
if self.params.mlm:
snake_case__ = self.params.special_tok_ids["""pad_token"""]
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids]
assert len(tk_) == len(UpperCamelCase__)
assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_)
snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_)
snake_case__ = torch.tensor(UpperCamelCase__) # (bs)
return tk_t, lg_t
| 654 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
a_ :Dict = 3_00 # TEMPERATURE (unit = K)
def a ( A__ , A__ , A__ , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _UpperCAmelCase ( a : str ):
if "model" in orig_key:
snake_case__ = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1]
snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
snake_case__ = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
snake_case__ = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
snake_case__ = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
snake_case__ = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
snake_case__ = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
snake_case__ = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
snake_case__ = """yoso.""" + orig_key
return orig_key
def _UpperCAmelCase ( a : Tuple , a : Dict ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(a )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case__ = val
snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""]
snake_case__ = torch.arange(a ).expand((1, -1) ) + 2
return orig_state_dict
def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ):
snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""]
snake_case__ = YosoConfig.from_json_file(a )
snake_case__ = YosoForMaskedLM(a )
snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a )
print(model.load_state_dict(a ) )
model.eval()
model.save_pretrained(a )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 654 | 0 |
import baseaa
def lowercase ( __A : str ) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("""utf-8""" ) )
def lowercase ( __A : bytes ) -> str:
'''simple docstring'''
return baseaa.baadecode(__A ).decode("""utf-8""" )
if __name__ == "__main__":
__lowercase : int = '''Hello World!'''
__lowercase : Union[str, Any] = baseaa_encode(test)
print(encoded)
__lowercase : Any = baseaa_decode(encoded)
print(decoded)
| 36 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ''''''
_lowercase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase : str = None # compression type in fsspec. ex: "gzip"
_lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
super().__init__(self , **UpperCamelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ = os.path.basename(self.file.path.split("""::""")[0])
snake_case__ = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case__ = None
@classmethod
def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
return super()._strip_protocol(UpperCamelCase__).lstrip("""/""")
def __magic_name__ ( self : Dict):
'''simple docstring'''
if self.dir_cache is None:
snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
snake_case__ = {f["""name"""]: f}
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
return self.file.open().read()
def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = self._strip_protocol(UpperCamelCase__)
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''bz2'''
_lowercase : Dict = '''bz2'''
_lowercase : Optional[int] = '''.bz2'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''gzip'''
_lowercase : List[str] = '''gzip'''
_lowercase : Any = '''.gz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = '''lz4'''
_lowercase : List[Any] = '''lz4'''
_lowercase : Dict = '''.lz4'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''xz'''
_lowercase : Union[str, Any] = '''xz'''
_lowercase : Optional[int] = '''.xz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''zstd'''
_lowercase : Tuple = '''zstd'''
_lowercase : Union[str, Any] = '''.zst'''
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = file_
def __enter__( self : List[str]):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__)
def __iter__( self : Any):
'''simple docstring'''
return iter(self._file)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return next(self._file)
def __getattr__( self : Any , UpperCamelCase__ : int):
'''simple docstring'''
return getattr(self._file , UpperCamelCase__)
def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__))
snake_case__ = fixed_enter
| 654 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A__ :
"""simple docstring"""
_lowercase = 42
_lowercase = None
_lowercase = None
UpperCamelCase : Union[str, Any] = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( __a ) -> int:
if root is None:
return 0
# Validation
def count_nodes(__a ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__a ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__a ) != count_coins(__a ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__a ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
a__, a__ : Optional[Any] = get_distrib(node.left )
a__, a__ : List[Any] = get_distrib(node.right )
a__ : str = 1 - left_distrib_excess
a__ : Dict = 1 - right_distrib_excess
a__ : Any = (
left_distrib_moves
+ right_distrib_moves
+ abs(__a )
+ abs(__a )
)
a__ : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__a , __a )
return get_distrib(__a )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
def _UpperCAmelCase ( a : int ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 0 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 38 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = size
snake_case__ = [0] * size
snake_case__ = [0] * size
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return (index & (index + 1)) - 1
def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = value
while index < self.size:
snake_case__ = self.get_prev(UpperCamelCase__) + 1
if current_left_border == index:
snake_case__ = value
else:
snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self.get_next(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
right -= 1 # Because of right is exclusive
snake_case__ = 0
while left <= right:
snake_case__ = self.get_prev(UpperCamelCase__)
if left <= current_left:
snake_case__ = max(UpperCamelCase__ , self.tree[right])
snake_case__ = current_left
else:
snake_case__ = max(UpperCamelCase__ , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 0 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[int] = False
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Dict=False ) ->List[str]:
snake_case_ = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
if return_labels:
if model_class in get_values(_UpperCamelCase ):
snake_case_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=1_3 , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=True , _UpperCamelCase : Any=True , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : str=9_9 , _UpperCamelCase : Tuple=3_2 , _UpperCamelCase : int=3_2 , _UpperCamelCase : int=2 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Optional[int]=3_7 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : int=5_1_2 , _UpperCamelCase : Any=1_6 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : Any=3 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : str=None , ) ->List[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = embedding_size
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) ->Optional[Any]:
snake_case_ = TFMobileBertModel(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(_UpperCamelCase )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] ) ->List[str]:
snake_case_ = TFMobileBertForMaskedLM(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) ->Optional[Any]:
snake_case_ = TFMobileBertForNextSentencePrediction(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) ->List[Any]:
snake_case_ = TFMobileBertForPreTraining(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case__( self : int , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ) ->Any:
snake_case_ = self.num_labels
snake_case_ = TFMobileBertForSequenceClassification(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : str ) ->Union[str, Any]:
snake_case_ = self.num_choices
snake_case_ = TFMobileBertForMultipleChoice(config=_UpperCamelCase )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__( self : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->str:
snake_case_ = self.num_labels
snake_case_ = TFMobileBertForTokenClassification(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) ->str:
snake_case_ = TFMobileBertForQuestionAnswering(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__( self : Union[str, Any] ) ->List[str]:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def snake_case__( self : Tuple ) ->Optional[Any]:
snake_case_ = TFMobileBertModelTest.TFMobileBertModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : Optional[Any] ) ->Any:
self.config_tester.run_common_tests()
def snake_case__( self : str ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCamelCase )
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCamelCase )
def snake_case__( self : List[str] ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : List[Any] ) ->int:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
snake_case_ = TFMobileBertModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : Tuple ) ->List[Any]:
snake_case_ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
snake_case_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) | 39 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
_lowercase : List[str] = PegasusConfig
_lowercase : Union[str, Any] = {}
_lowercase : Tuple = '''gelu'''
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return config, inputs_dict
def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder()
snake_case__ = inputs_dict["""input_ids"""]
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict["""attention_mask"""][:1, :]
snake_case__ = inputs_dict["""head_mask"""]
snake_case__ = 1
# first forward pass
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__)
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1)
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1)
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0]
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1]))
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3)
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ):
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_lowercase : List[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Optional[int] = True
_lowercase : Dict = False
_lowercase : Any = False
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = TFPegasusModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__)
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_lowercase : str = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_lowercase : int = '''google/pegasus-xsum'''
@cached_property
def __magic_name__ ( self : Dict):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.translate_src_text(**UpperCamelCase__)
assert self.expected_text == generated_words
def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""")
snake_case__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__)
return generated_words
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 654 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
__UpperCAmelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : int = NllbTokenizer
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else mask_token
UpperCamelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, src_lang=SCREAMING_SNAKE_CASE_, tgt_lang=SCREAMING_SNAKE_CASE_, additional_special_tokens=SCREAMING_SNAKE_CASE_, legacy_behaviour=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Optional[int] = vocab_file
UpperCamelCase : int = False if not self.vocab_file else True
UpperCamelCase : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
UpperCamelCase : Optional[int] = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Optional[int] = src_lang if src_lang is not None else 'eng_Latn'
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCamelCase : str = src_lang
UpperCamelCase : List[str] = self(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = tgt_lang_id
return inputs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "eng_Latn", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "fra_Latn", **SCREAMING_SNAKE_CASE_, ) -> BatchEncoding:
UpperCamelCase : Union[str, Any] = src_lang
UpperCamelCase : str = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
if self.legacy_behaviour:
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase : Optional[int] = [self.cur_lang_code]
UpperCamelCase : List[str] = [self.eos_token_id]
UpperCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[str] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
if self.legacy_behaviour:
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Any = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase : int = [self.cur_lang_code]
UpperCamelCase : Tuple = [self.eos_token_id]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a__ = logging.get_logger(__name__)
a__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
a__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
a__ = {
"""jukebox""": 5_1_2,
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token
super().__init__(
unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case__ = version
snake_case__ = max_n_lyric_tokens
snake_case__ = n_genres
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 7_9:
snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""")
snake_case__ = regex.compile(UpperCamelCase__)
snake_case__ = {v: k for k, v in self.artists_encoder.items()}
snake_case__ = {v: k for k, v in self.genres_encoder.items()}
snake_case__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists]
for genres in range(len(UpperCamelCase__)):
snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]]
snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
return list(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self._tokenize(UpperCamelCase__)
return artist, genre, lyrics
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False):
'''simple docstring'''
for idx in range(len(self.version)):
if self.version[idx] == "v3":
snake_case__ = artists[idx].lower()
snake_case__ = [genres[idx].lower()]
else:
snake_case__ = self._normalize(artists[idx]) + """.v2"""
snake_case__ = [
self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""")
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""")
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))}
snake_case__ = 0
snake_case__ = len(UpperCamelCase__) + 1
snake_case__ = self.vocab
snake_case__ = {v: k for k, v in self.vocab.items()}
snake_case__ = """"""
else:
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""")
snake_case__ = self._run_strip_accents(UpperCamelCase__)
snake_case__ = lyrics.replace("""\\""" , """\n""")
snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], []
return artists, genres, lyrics
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__)
snake_case__ = []
for char in text:
snake_case__ = unicodedata.category(UpperCamelCase__)
if cat == "Mn":
continue
output.append(UpperCamelCase__)
return "".join(UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = (
[chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)]
+ ["""."""]
)
snake_case__ = frozenset(UpperCamelCase__)
snake_case__ = re.compile(R"""_+""")
snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()])
snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""")
return text
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
return " ".join(UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = TensorType(UpperCamelCase__)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""")
import tensorflow as tf
snake_case__ = tf.constant
snake_case__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""")
import torch
snake_case__ = torch.tensor
snake_case__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""")
import jax.numpy as jnp # noqa: F811
snake_case__ = jnp.array
snake_case__ = _is_jax
else:
snake_case__ = np.asarray
snake_case__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case__ = [inputs]
if not is_tensor(UpperCamelCase__):
snake_case__ = as_tensor(UpperCamelCase__)
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""")
return inputs
def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"):
'''simple docstring'''
snake_case__ = [0, 0, 0]
snake_case__ = [artist] * len(self.version)
snake_case__ = [genres] * len(self.version)
snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = [-INFINITY] * len(full_tokens[-1])
snake_case__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__)
for i in range(len(self.version))
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks})
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__))
return (artists_file, genres_file, lyrics_file)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = self.artists_decoder.get(UpperCamelCase__)
snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index]
snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index]
return artist, genres, lyrics
| 654 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'time_series_transformer'
SCREAMING_SNAKE_CASE : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Union[str, Any] ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[int] = None ,lowercase__ : str = "student_t" ,lowercase__ : str = "nll" ,lowercase__ : int = 1 ,lowercase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] ,lowercase__ : Optional[Union[str, bool]] = "mean" ,lowercase__ : int = 0 ,lowercase__ : int = 0 ,lowercase__ : int = 0 ,lowercase__ : int = 0 ,lowercase__ : Optional[List[int]] = None ,lowercase__ : Optional[List[int]] = None ,lowercase__ : int = 3_2 ,lowercase__ : int = 3_2 ,lowercase__ : int = 2 ,lowercase__ : int = 2 ,lowercase__ : int = 2 ,lowercase__ : int = 2 ,lowercase__ : bool = True ,lowercase__ : str = "gelu" ,lowercase__ : int = 6_4 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : int = 1_0_0 ,lowercase__ : float = 0.0_2 ,lowercase__ : Any=True ,**lowercase__ : List[str] ,):
# time series specific configuration
__lowercase = prediction_length
__lowercase = context_length or prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowercase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowercase = cardinality
else:
__lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowercase = embedding_dimension
else:
__lowercase = [min(5_0 ,(cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(lowercase__ ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
super().__init__(is_encoder_decoder=lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 41 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize):
'''simple docstring'''
snake_case__ = """bilinear"""
snake_case__ = max_size
snake_case__ = short_edge_length
def __call__( self : List[str] , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = []
for img in imgs:
snake_case__ , snake_case__ = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__)
if h < w:
snake_case__ , snake_case__ = size, scale * w
else:
snake_case__ , snake_case__ = scale * h, size
if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size:
snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = newh * scale
snake_case__ = neww * scale
snake_case__ = int(neww + 0.5)
snake_case__ = int(newh + 0.5)
if img.dtype == np.uinta:
snake_case__ = Image.fromarray(UpperCamelCase__)
snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
snake_case__ = np.asarray(UpperCamelCase__)
else:
snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
snake_case__ = nn.functional.interpolate(
UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0)
img_augs.append(UpperCamelCase__)
return img_augs
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
snake_case__ = cfg.INPUT.FORMAT
snake_case__ = cfg.SIZE_DIVISIBILITY
snake_case__ = cfg.PAD_VALUE
snake_case__ = cfg.INPUT.MAX_SIZE_TEST
snake_case__ = cfg.MODEL.DEVICE
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images]))
snake_case__ = [im.shape[-2:] for im in images]
snake_case__ = [
nn.functional.pad(
UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase__ , UpperCamelCase__)
]
return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__)
def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = [images]
if single_image:
assert len(UpperCamelCase__) == 1
for i in range(len(UpperCamelCase__)):
if isinstance(images[i] , torch.Tensor):
images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
snake_case__ = torch.tensor([im.shape[:2] for im in images])
snake_case__ = self.aug(UpperCamelCase__)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case__ = [self.normalizer(UpperCamelCase__) for x in images]
# now pad them to do the following operations
snake_case__ , snake_case__ = self.pad(UpperCamelCase__)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _UpperCAmelCase ( a : Optional[Any] , a : Any ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ):
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
snake_case__ , snake_case__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 654 | 0 |
'''simple docstring'''
import os
import string
import sys
A_ = 1 << 8
A_ = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
A_ = KEYMAP["up"]
A_ = KEYMAP["left"]
if sys.platform == "win32":
A_ = []
A_ = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
A_ = ord(str(i))
def _UpperCamelCase ( ) -> int:
if os.name == "nt":
import msvcrt
lowerCamelCase_ = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCamelCase ) == 0:
# Read the keystroke
lowerCamelCase_ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCamelCase_ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCamelCase_ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(__UpperCamelCase )
if ord(__UpperCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
lowerCamelCase_ = chr(KEYMAP['esc'] )
except KeyError:
lowerCamelCase_ = cha[1]
else:
lowerCamelCase_ = ch.decode(__UpperCamelCase )
else:
lowerCamelCase_ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCamelCase_ = sys.stdin.fileno()
lowerCamelCase_ = termios.tcgetattr(__UpperCamelCase )
try:
tty.setraw(__UpperCamelCase )
lowerCamelCase_ = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCamelCase ,termios.TCSADRAIN ,__UpperCamelCase )
return ch
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = get_raw_chars()
if ord(__UpperCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCamelCase ) == KEYMAP["esc"]:
lowerCamelCase_ = get_raw_chars()
if ord(__UpperCamelCase ) == KEYMAP["mod_int"]:
lowerCamelCase_ = get_raw_chars()
if ord(__UpperCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 42 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''wavlm'''
def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = conv_bias
snake_case__ = num_buckets
snake_case__ = max_bucket_distance
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim)
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 654 | 0 |
from collections.abc import Sequence
def _a ( SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
lowercase__ = nums[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ = nums[i]
lowercase__ = max(SCREAMING_SNAKE_CASE , ans + num , SCREAMING_SNAKE_CASE )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCAmelCase = int(input('Enter number of elements : ').strip())
lowerCAmelCase = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 43 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : UNetaDModel
_lowercase : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__)
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
snake_case__ = self.unet.config.sample_size
snake_case__ = (batch_size, 3, img_size, img_size)
snake_case__ = self.unet
snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma
snake_case__ = sample.to(self.device)
self.scheduler.set_timesteps(UpperCamelCase__)
self.scheduler.set_sigmas(UpperCamelCase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# prediction step
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__)
snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean
snake_case__ = sample_mean.clamp(0 , 1)
snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__)
| 654 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[int],__A : Any,__A : Tuple=7,__A : List[Any]=3,__A : str=1_8,__A : Tuple=3_0,__A : Dict=4_0_0,__A : int=True,__A : List[Any]=None,__A : Any=True,__A : int=False,__A : str=True,__A : int=True,__A : Any=[0.5, 0.5, 0.5],__A : Dict=[0.5, 0.5, 0.5],):
_lowerCamelCase : str = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Tuple = min_resolution
_lowerCamelCase : List[str] = max_resolution
_lowerCamelCase : str = do_resize
_lowerCamelCase : int = size if size is not None else {"height": 1_8, "width": 2_0}
_lowerCamelCase : Union[str, Any] = do_thumbnail
_lowerCamelCase : Optional[int] = do_align_axis
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Tuple = do_normalize
_lowerCamelCase : str = image_mean
_lowerCamelCase : int = image_std
def lowerCamelCase_ ( self : List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A,"do_resize" ) )
self.assertTrue(hasattr(__A,"size" ) )
self.assertTrue(hasattr(__A,"do_thumbnail" ) )
self.assertTrue(hasattr(__A,"do_align_long_axis" ) )
self.assertTrue(hasattr(__A,"do_pad" ) )
self.assertTrue(hasattr(__A,"do_normalize" ) )
self.assertTrue(hasattr(__A,"image_mean" ) )
self.assertTrue(hasattr(__A,"image_std" ) )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{"height": 1_8, "width": 2_0} )
_lowerCamelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict,size=4_2 )
self.assertEqual(image_processor.size,{"height": 4_2, "width": 4_2} )
# Previous config had dimensions in (width, height) order
_lowerCamelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict,size=(4_2, 8_4) )
self.assertEqual(image_processor.size,{"height": 8_4, "width": 4_2} )
def lowerCamelCase_ ( self : List[Any] ):
pass
@is_flaky()
def lowerCamelCase_ ( self : Dict ):
# Initialize image_processing
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A,Image.Image )
# Test not batched input
_lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
# Test batched
_lowerCamelCase : int = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
@is_flaky()
def lowerCamelCase_ ( self : List[str] ):
# Initialize image_processing
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A,np.ndarray )
# Test not batched input
_lowerCamelCase : List[Any] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
# Test batched
_lowerCamelCase : List[Any] = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
@is_flaky()
def lowerCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A,torch.Tensor )
# Test not batched input
_lowerCamelCase : Tuple = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
# Test batched
_lowerCamelCase : int = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),) | 44 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def __magic_name__ ( self : int):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : str):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 654 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def A ( lowercase__ : int , lowercase__ : Union[str, Any]=False ) -> Dict:
try:
UpperCamelCase__ :str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase__ :Dict = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase__ :Any = strtobool(lowercase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase = parse_flag_from_env("RUN_SLOW", default=False)
UpperCamelCase = parse_flag_from_env("RUN_REMOTE", default=False)
UpperCamelCase = parse_flag_from_env("RUN_LOCAL", default=True)
UpperCamelCase = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
UpperCamelCase = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def A ( lowercase__ : Any ) -> List[Any]:
try:
import faiss # noqa
except ImportError:
UpperCamelCase__ :Optional[Any] = unittest.skip("""test requires faiss""" )(lowercase__ )
return test_case
def A ( lowercase__ : List[str] ) -> List[str]:
try:
import regex # noqa
except ImportError:
UpperCamelCase__ :List[str] = unittest.skip("""test requires regex""" )(lowercase__ )
return test_case
def A ( lowercase__ : int ) -> List[str]:
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase__ :Tuple = unittest.skip("""test requires elasticsearch""" )(lowercase__ )
return test_case
def A ( lowercase__ : Dict ) -> str:
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase__ :str = unittest.skip("""test requires sqlalchemy""" )(lowercase__ )
return test_case
def A ( lowercase__ : List[str] ) -> List[str]:
if not config.TORCH_AVAILABLE:
UpperCamelCase__ :Tuple = unittest.skip("""test requires PyTorch""" )(lowercase__ )
return test_case
def A ( lowercase__ : Dict ) -> Union[str, Any]:
if not config.TF_AVAILABLE:
UpperCamelCase__ :List[str] = unittest.skip("""test requires TensorFlow""" )(lowercase__ )
return test_case
def A ( lowercase__ : Tuple ) -> Optional[int]:
if not config.JAX_AVAILABLE:
UpperCamelCase__ :str = unittest.skip("""test requires JAX""" )(lowercase__ )
return test_case
def A ( lowercase__ : List[Any] ) -> Optional[Any]:
if not config.PIL_AVAILABLE:
UpperCamelCase__ :Optional[int] = unittest.skip("""test requires Pillow""" )(lowercase__ )
return test_case
def A ( lowercase__ : Any ) -> Any:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : Union[str, Any] ) -> Any:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : Optional[Any] ) -> Union[str, Any]:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : Optional[Any] ) -> str:
def _require_spacy_model(lowercase__ : Tuple ):
try:
import spacy # noqa F401
spacy.load(lowercase__ )
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase__ )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(lowercase__ ) )(lowercase__ )
else:
return test_case
return _require_spacy_model
def A ( lowercase__ : int ) -> List[str]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : Dict ) -> List[str]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : List[str] ) -> Dict:
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase__ :Optional[int] = unittest.skip("""test is slow""" )(lowercase__ )
return test_case
def A ( lowercase__ : Optional[Any] ) -> int:
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase__ :Dict = unittest.skip("""test is local""" )(lowercase__ )
return test_case
def A ( lowercase__ : Optional[Any] ) -> List[Any]:
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase__ :str = unittest.skip("""test is packaged""" )(lowercase__ )
return test_case
def A ( lowercase__ : int ) -> List[Any]:
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase__ :List[Any] = unittest.skip("""test requires remote""" )(lowercase__ )
return test_case
def A ( *lowercase__ : List[Any] ) -> str:
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(lowercase__ ) and name.startswith("""test""" ):
for decorator in decorators:
UpperCamelCase__ :Any = decorator(lowercase__ )
setattr(cls , lowercase__ , lowercase__ )
return cls
return decorate
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
pass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Tuple = 0
_snake_case : Any = 1
_snake_case : Union[str, Any] = 2
@contextmanager
def A ( lowercase__ : Dict=OfflineSimulationMode.CONNECTION_FAILS , lowercase__ : Tuple=1E-16 ) -> Union[str, Any]:
UpperCamelCase__ :List[str] = requests.Session().request
def timeout_request(lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , **lowercase__ : int ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase__ :Dict = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
UpperCamelCase__ :List[str] = timeout
try:
return online_request(lowercase__ , lowercase__ , **lowercase__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase__ :Optional[int] = url
UpperCamelCase__ :Union[str, Any] = e.args[0]
UpperCamelCase__ :List[Any] = (max_retry_error.args[0].replace("""10.255.255.1""" , f"""OfflineMock[{url}]""" ),)
UpperCamelCase__ :Optional[Any] = (max_retry_error,)
raise
def raise_connection_error(lowercase__ : Union[str, Any] , lowercase__ : Tuple , **lowercase__ : Any ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=lowercase__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , lowercase__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , lowercase__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def A ( *lowercase__ : Tuple , **lowercase__ : List[Any] ) -> Optional[int]:
UpperCamelCase__ :Any = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowercase__ , **lowercase__ ) as tmp_dir:
try:
os.chdir(lowercase__ )
yield
finally:
os.chdir(lowercase__ )
@contextmanager
def A ( ) -> List[Any]:
import gc
gc.collect()
UpperCamelCase__ :Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def A ( ) -> Optional[int]:
import gc
gc.collect()
UpperCamelCase__ :int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def A ( lowercase__ : List[str] , lowercase__ : List[str] ) -> str:
return deepcopy(lowercase__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(lowercase__ ).integers(0 , 100 , 10 ).tolist()
def A ( lowercase__ : str ) -> Union[str, Any]:
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowercase__ : int , *lowercase__ : str , **lowercase__ : Optional[int] ):
try:
return func(*lowercase__ , **lowercase__ )
except HTTPError as err:
if str(lowercase__ ).startswith("""500""" ) or str(lowercase__ ).startswith("""502""" ):
pytest.xfail(str(lowercase__ ) )
raise err
return decorator.decorator(_wrapper , lowercase__ )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :Union[str, Any] = returncode
UpperCamelCase__ :Tuple = stdout
UpperCamelCase__ :List[Any] = stderr
async def A ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Dict:
while True:
UpperCamelCase__ :str = await stream.readline()
if line:
callback(lowercase__ )
else:
break
async def A ( lowercase__ : Optional[Any] , lowercase__ : Tuple=None , lowercase__ : Union[str, Any]=None , lowercase__ : Dict=None , lowercase__ : Optional[Any]=False , lowercase__ : List[Any]=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ , """ """.join(lowercase__ ) )
UpperCamelCase__ :int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowercase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowercase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase__ :Dict = []
UpperCamelCase__ :int = []
def tee(lowercase__ : int , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Tuple="" ):
UpperCamelCase__ :int = line.decode("""utf-8""" ).rstrip()
sink.append(lowercase__ )
if not quiet:
print(lowercase__ , lowercase__ , file=lowercase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowercase__ : tee(lowercase__ , lowercase__ , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda lowercase__ : tee(lowercase__ , lowercase__ , sys.stderr , label="""stderr:""" ) ),
] , timeout=lowercase__ , )
return _RunOutput(await p.wait() , lowercase__ , lowercase__ )
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : List[str]=180 , lowercase__ : Any=False , lowercase__ : Optional[Any]=True ) -> _RunOutput:
UpperCamelCase__ :Optional[Any] = asyncio.get_event_loop()
UpperCamelCase__ :Optional[int] = loop.run_until_complete(
_stream_subprocess(lowercase__ , env=lowercase__ , stdin=lowercase__ , timeout=lowercase__ , quiet=lowercase__ , echo=lowercase__ ) )
UpperCamelCase__ :Tuple = """ """.join(lowercase__ )
if result.returncode > 0:
UpperCamelCase__ :str = """\n""".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def A ( ) -> Optional[int]:
UpperCamelCase__ :str = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
UpperCamelCase__ :Tuple = re.sub(r"""^gw""" , """""" , lowercase__ , 0 , re.M )
return int(lowercase__ )
def A ( ) -> str:
UpperCamelCase__ :Optional[Any] = 2_9500
UpperCamelCase__ :List[str] = pytest_xdist_worker_id()
return port + uniq_delta | 45 |
a__ = [0, 2, 4, 6, 8]
a__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ = 0
for digit in range(10 ):
snake_case__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a , a )
return result
snake_case__ = 0
for digita in range(10 ):
snake_case__ = digita
if (remainder + digita) % 2 == 0:
snake_case__ = ODD_DIGITS
else:
snake_case__ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , )
return result
def _UpperCAmelCase ( a : int = 9 ):
snake_case__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a , 0 , [0] * length , a )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A_ :
def __init__( self: List[Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any]=13 ,__lowerCAmelCase: Union[str, Any]=7 ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: int=True ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Any=99 ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: Tuple=2 ,__lowerCAmelCase: Union[str, Any]=4 ,__lowerCAmelCase: Any=37 ,__lowerCAmelCase: Tuple="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Union[str, Any]=512 ,__lowerCAmelCase: Dict=16 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=3 ,__lowerCAmelCase: Union[str, Any]=4 ,__lowerCAmelCase: Any=None ,):
'''simple docstring'''
_lowerCamelCase : Dict = parent
_lowerCamelCase : str = 13
_lowerCamelCase : Dict = 7
_lowerCamelCase : Dict = True
_lowerCamelCase : str = True
_lowerCamelCase : int = True
_lowerCamelCase : Any = True
_lowerCamelCase : List[str] = 99
_lowerCamelCase : Tuple = 384
_lowerCamelCase : str = 2
_lowerCamelCase : int = 4
_lowerCamelCase : List[Any] = 37
_lowerCamelCase : int = "gelu"
_lowerCamelCase : Union[str, Any] = 0.1
_lowerCamelCase : Any = 0.1
_lowerCamelCase : Optional[Any] = 512
_lowerCamelCase : Union[str, Any] = 16
_lowerCamelCase : List[str] = 2
_lowerCamelCase : str = 0.02
_lowerCamelCase : Dict = 3
_lowerCamelCase : Optional[int] = 4
_lowerCamelCase : Optional[int] = 128
_lowerCamelCase : int = 2
_lowerCamelCase : List[Any] = 9
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = None
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : Tuple = None
if self.use_input_mask:
_lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCamelCase : Dict = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,return_dict=__lowerCAmelCase ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self: List[str] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = TFConvBertModel(config=__lowerCAmelCase )
_lowerCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCamelCase : List[str] = [input_ids, input_mask]
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: str ,__lowerCAmelCase: int ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = TFConvBertForMaskedLM(config=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Dict = TFConvBertForSequenceClassification(config=__lowerCAmelCase )
_lowerCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.num_choices
_lowerCamelCase : List[str] = TFConvBertForMultipleChoice(config=__lowerCAmelCase )
_lowerCamelCase : Dict = tf.tile(tf.expand_dims(__lowerCAmelCase ,1 ) ,(1, self.num_choices, 1) )
_lowerCamelCase : int = tf.tile(tf.expand_dims(__lowerCAmelCase ,1 ) ,(1, self.num_choices, 1) )
_lowerCamelCase : str = tf.tile(tf.expand_dims(__lowerCAmelCase ,1 ) ,(1, self.num_choices, 1) )
_lowerCamelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Any ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : List[Any] = TFConvBertForTokenClassification(config=__lowerCAmelCase )
_lowerCamelCase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self: Tuple ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = TFConvBertForQuestionAnswering(config=__lowerCAmelCase )
_lowerCamelCase : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : Dict = config_and_inputs
_lowerCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Dict = TFConvBertModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : str = True
_lowerCamelCase : int = True
if hasattr(__lowerCAmelCase ,"use_cache" ):
_lowerCamelCase : Any = True
_lowerCamelCase : int = getattr(self.model_tester ,"encoder_seq_length" ,self.model_tester.seq_length )
_lowerCamelCase : List[str] = getattr(self.model_tester ,"key_length" ,__lowerCAmelCase )
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = len(model(__lowerCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase ,saved_model=__lowerCAmelCase )
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,"saved_model" ,"1" )
_lowerCamelCase : Dict = tf.keras.models.load_model(__lowerCAmelCase )
_lowerCamelCase : Any = model(__lowerCAmelCase )
if self.is_encoder_decoder:
_lowerCamelCase : Union[str, Any] = outputs["encoder_hidden_states"]
_lowerCamelCase : Union[str, Any] = outputs["encoder_attentions"]
else:
_lowerCamelCase : Optional[Any] = outputs["hidden_states"]
_lowerCamelCase : Tuple = outputs["attentions"]
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = getattr(
self.model_tester ,"expected_num_hidden_layers" ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) ,[self.model_tester.seq_length, self.model_tester.hidden_size] ,)
self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = True
_lowerCamelCase : str = getattr(self.model_tester ,"decoder_seq_length" ,self.model_tester.seq_length )
_lowerCamelCase : List[str] = getattr(self.model_tester ,"encoder_seq_length" ,self.model_tester.seq_length )
_lowerCamelCase : int = getattr(self.model_tester ,"key_length" ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = getattr(self.model_tester ,"key_length" ,__lowerCAmelCase )
def check_decoder_attentions_output(__lowerCAmelCase: Optional[int] ):
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertEqual(out_len % 2 ,0 )
_lowerCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] ,)
def check_encoder_attentions_output(__lowerCAmelCase: Dict ):
_lowerCamelCase : Tuple = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
for model_class in self.all_model_classes:
_lowerCamelCase : int = True
_lowerCamelCase : Any = False
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : List[str] = len(__lowerCAmelCase )
self.assertEqual(config.output_hidden_states ,__lowerCAmelCase )
check_encoder_attentions_output(__lowerCAmelCase )
if self.is_encoder_decoder:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states ,__lowerCAmelCase )
check_decoder_attentions_output(__lowerCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states ,__lowerCAmelCase )
check_encoder_attentions_output(__lowerCAmelCase )
# Check attention is always last and order is fine
_lowerCamelCase : Any = True
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(__lowerCAmelCase ) )
self.assertEqual(model.config.output_hidden_states ,__lowerCAmelCase )
check_encoder_attentions_output(__lowerCAmelCase )
@require_tf
class A_ ( unittest.TestCase ):
@slow
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_lowerCamelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCamelCase : Dict = model(__lowerCAmelCase )[0]
_lowerCamelCase : Any = [1, 6, 768]
self.assertEqual(output.shape ,__lowerCAmelCase )
_lowerCamelCase : Dict = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-4 ) | 46 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = '''facebook/nllb-200-distilled-600M'''
_lowercase : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase : Optional[int] = '''translator'''
_lowercase : Optional[Any] = AutoTokenizer
_lowercase : Dict = AutoModelForSeqaSeqLM
_lowercase : List[str] = LANGUAGE_CODES
_lowercase : Optional[Any] = ['''text''', '''text''', '''text''']
_lowercase : Tuple = ['''text''']
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
snake_case__ = self.lang_to_code[src_lang]
snake_case__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__)
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.model.generate(**UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
| 654 | 0 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorPipeline
__SCREAMING_SNAKE_CASE : int = ['''prompt''']
__SCREAMING_SNAKE_CASE : Tuple = ['''prompt''', '''negative_prompt''']
__SCREAMING_SNAKE_CASE : Any = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__SCREAMING_SNAKE_CASE : Tuple = False
@property
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return 3_2
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return 3_2
@property
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return 1_0_0
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Union[str, Any] = {
'num_attention_heads': 2,
'attention_head_dim': 1_2,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
__a : Optional[Any] = PriorTransformer(**SCREAMING_SNAKE_CASE__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a : Tuple = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
__a : int = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE__ )
return model
@property
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Optional[Any] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_2_4 , )
return image_processor
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Union[str, Any] = self.dummy_prior
__a : int = self.dummy_image_encoder
__a : Optional[Any] = self.dummy_text_encoder
__a : Tuple = self.dummy_tokenizer
__a : Dict = self.dummy_image_processor
__a : Tuple = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=10.0 , )
__a : Any = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
__a : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__a : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__a : Tuple = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Union[str, Any] = 'cpu'
__a : str = self.get_dummy_components()
__a : Optional[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
__a : List[Any] = output.image_embeds
__a : Optional[int] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
__a : List[Any] = image[0, -1_0:]
__a : Union[str, Any] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
__a : Optional[int] = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : str = torch_device == 'cpu'
__a : Tuple = True
__a : Optional[Any] = False
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , )
@skip_mps
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Optional[int] = torch_device == 'cpu'
__a : Any = False
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE__ , test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , )
| 47 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( a : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = module
snake_case__ = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , )
snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str):
'''simple docstring'''
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : Any = 2.109_6595_5269_2574
_lowercase : Tuple = '''Hello my name is'''
_lowercase : List[Any] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : List[str] = 10
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = AutoTokenizer.from_pretrained(self.model_name)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : str):
'''simple docstring'''
super().setUp()
# Models and tokenizer
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""")
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config"""))
snake_case__ = config.to_dict()
snake_case__ = config.to_diff_dict()
snake_case__ = config.to_json_string()
def __magic_name__ ( self : Dict):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
snake_case__ = self.model_fpaa.get_memory_footprint()
snake_case__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
snake_case__ = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
snake_case__ = True
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__):
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__):
# Tries with `str`
self.model_abit.to("""cpu""")
with self.assertRaises(UpperCamelCase__):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0"""))
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_fpaa.to(torch.floataa)
snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
snake_case__ = self.model_fpaa.to("""cpu""")
# Check this does not throw an error
snake_case__ = self.model_fpaa.half()
# Check this does not throw an error
snake_case__ = self.model_fpaa.float()
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""")
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __magic_name__ ( cls : Optional[Any]):
'''simple docstring'''
snake_case__ = """t5-small"""
snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
snake_case__ = AutoTokenizer.from_pretrained(cls.model_name)
snake_case__ = """Translate in German: Hello, my dog is cute"""
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any):
'''simple docstring'''
from transformers import TaForConditionalGeneration
snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules
snake_case__ = None
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
snake_case__ = modules
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
super().setUp()
# model_name
snake_case__ = """bigscience/bloom-560m"""
snake_case__ = """t5-small"""
# Different types of model
snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Sequence classification model
snake_case__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# CausalLM model
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Seq2seq model
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : List[str]):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
snake_case__ = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""")
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
# Second real batch
snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = """facebook/opt-350m"""
super().setUp()
def __magic_name__ ( self : Any):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""):
return
# Step 1: freeze all parameters
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
snake_case__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
snake_case__ = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__)):
snake_case__ = LoRALayer(module.q_proj , rank=1_6)
snake_case__ = LoRALayer(module.k_proj , rank=1_6)
snake_case__ = LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
snake_case__ = model.forward(**UpperCamelCase__)
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(UpperCamelCase__ , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = '''gpt2-xl'''
_lowercase : Any = 3.3191_8548_5415_2187
| 654 | 0 |
'''simple docstring'''
import random
def A ( UpperCamelCase_ : int , UpperCamelCase_ : float , UpperCamelCase_ : bool = False ) -> dict:
'''simple docstring'''
lowerCAmelCase__ = {i: [] for i in range(UpperCamelCase_ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase_ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase_ ):
for j in range(i + 1 , UpperCamelCase_ ):
if random.random() < probability:
graph[i].append(UpperCamelCase_ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase_ )
return graph
def A ( UpperCamelCase_ : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(UpperCamelCase_ ) if i != j] for i in range(UpperCamelCase_ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a__ = """"""
a__ = """"""
a__ = """"""
a__ = 1 # (0 is vertical, 1 is horizontal)
def _UpperCAmelCase ( ):
snake_case__ , snake_case__ = get_dataset(a , a )
print("""Processing...""" )
snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ = random_chars(32 )
snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(a )} with {file_name}''' )
snake_case__ = []
for anno in new_annos[index]:
snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(a )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = []
snake_case__ = []
for label_file in glob.glob(os.path.join(a , """*.txt""" ) ):
snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(a ) as in_file:
snake_case__ = in_file.readlines()
snake_case__ = os.path.join(a , F'''{label_name}.jpg''' )
snake_case__ = []
for obj_list in obj_lists:
snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _UpperCAmelCase ( a : list , a : list , a : int = 1 ):
snake_case__ = []
snake_case__ = []
snake_case__ = []
for idx in range(len(a ) ):
snake_case__ = []
snake_case__ = img_list[idx]
path_list.append(a )
snake_case__ = anno_list[idx]
snake_case__ = cva.imread(a )
if flip_type == 1:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _UpperCAmelCase ( a : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 654 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowercase__ ( snake_case_ :str , snake_case_ :Dict=0 ):
return sorted(snake_case_ , key=lambda snake_case_ : x[column] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Optional[int] , snake_case_ :Optional[Any]=float('''inf''' ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , snake_case_ ):
__UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCAmelCase = current_dis
return min_dis
def lowercase__ ( snake_case_ :Dict , snake_case_ :List[str] , snake_case_ :Any=float('''inf''' ) ):
for i in range(min(6 , points_counts - 1 ) , snake_case_ ):
for j in range(max(0 , i - 6 ) , snake_case_ ):
__UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCAmelCase = current_dis
return min_dis
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :List[str] , snake_case_ :Dict ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(snake_case_ , snake_case_ )
# recursion
__UpperCAmelCase = points_counts // 2
__UpperCAmelCase = closest_pair_of_points_sqr(
snake_case_ , points_sorted_on_y[:mid] , snake_case_ )
__UpperCAmelCase = closest_pair_of_points_sqr(
snake_case_ , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCAmelCase = min(snake_case_ , snake_case_ )
__UpperCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(snake_case_ )
__UpperCAmelCase = dis_between_closest_in_strip(
snake_case_ , len(snake_case_ ) , snake_case_ )
return min(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :Any ):
__UpperCAmelCase = column_based_sort(snake_case_ , column=0 )
__UpperCAmelCase = column_based_sort(snake_case_ , column=1 )
return (
closest_pair_of_points_sqr(
snake_case_ , snake_case_ , snake_case_ )
) ** 0.5
if __name__ == "__main__":
_lowercase : Optional[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 49 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a__ = 5_0_0_0_0_0
a__ , a__ = os.path.split(__file__)
a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ):
snake_case__ = dataset.map(**a )
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ):
snake_case__ = dataset.filter(**a )
def _UpperCAmelCase ( ):
snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case__ = generate_example_dataset(
os.path.join(a , """dataset.arrow""" ) , a , num_examples=a )
snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a )
def tokenize(a : Union[str, Any] ):
return tokenizer(examples["""text"""] )
snake_case__ = map(a )
snake_case__ = map(a , batched=a )
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""numpy""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""pandas""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
snake_case__ = map(a , function=a , batched=a )
snake_case__ = filter(a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a , """wb""" ) as f:
f.write(json.dumps(a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 654 | 0 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCamelCase : int = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( __lowerCAmelCase : str ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ):
if args.student_type == "roberta":
lowerCamelCase__ = False
elif args.student_type == "gpt2":
lowerCamelCase__ = False
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
if args.student_type == "roberta":
lowerCamelCase__ = False
def A__ ( ):
lowerCamelCase__ = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=__lowerCAmelCase , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__lowerCAmelCase , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__lowerCAmelCase , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=__lowerCAmelCase , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=__lowerCAmelCase , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=__lowerCAmelCase , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=__lowerCAmelCase , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=__lowerCAmelCase , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=__lowerCAmelCase , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=__lowerCAmelCase , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=__lowerCAmelCase , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=__lowerCAmelCase , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=__lowerCAmelCase , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=__lowerCAmelCase , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=__lowerCAmelCase , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=__lowerCAmelCase , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=__lowerCAmelCase , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__lowerCAmelCase , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=__lowerCAmelCase , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__lowerCAmelCase , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=__lowerCAmelCase , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=__lowerCAmelCase , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__lowerCAmelCase , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=__lowerCAmelCase , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__lowerCAmelCase , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=__lowerCAmelCase , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=__lowerCAmelCase , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=__lowerCAmelCase , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=__lowerCAmelCase , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=__lowerCAmelCase , default=4000 , help="""Checkpoint interval.""" )
lowerCamelCase__ = parser.parse_args()
sanity_checks(__lowerCAmelCase )
# ARGS #
init_gpu_params(__lowerCAmelCase )
set_seed(__lowerCAmelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(__lowerCAmelCase ) , __lowerCAmelCase , indent=4 )
git_log(args.dump_path )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = MODEL_CLASSES[args.student_type]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCamelCase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCamelCase__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCamelCase__ = tokenizer.all_special_tokens.index(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
lowerCamelCase__ = special_tok_ids
lowerCamelCase__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , """rb""" ) as fp:
lowerCamelCase__ = pickle.load(__lowerCAmelCase )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , """rb""" ) as fp:
lowerCamelCase__ = pickle.load(__lowerCAmelCase )
lowerCamelCase__ = np.maximum(__lowerCAmelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCamelCase__ = 0.0 # do not predict special tokens
lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase )
else:
lowerCamelCase__ = None
lowerCamelCase__ = LmSeqsDataset(params=__lowerCAmelCase , data=__lowerCAmelCase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
lowerCamelCase__ = student_config_class.from_pretrained(args.student_config )
lowerCamelCase__ = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
lowerCamelCase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__lowerCAmelCase )
else:
lowerCamelCase__ = student_model_class(__lowerCAmelCase )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info("""Student loaded.""" )
# TEACHER #
lowerCamelCase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__lowerCAmelCase )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__lowerCAmelCase , __lowerCAmelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__lowerCAmelCase , __lowerCAmelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCamelCase__ = Distiller(
params=__lowerCAmelCase , dataset=__lowerCAmelCase , token_probs=__lowerCAmelCase , student=__lowerCAmelCase , teacher=__lowerCAmelCase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 50 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : List[str] , a : Any=False ):
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = """"""
else:
snake_case__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ):
snake_case__ = dct.pop(a )
snake_case__ = val
def _UpperCAmelCase ( ):
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : List[str] , a : Tuple ):
snake_case__ = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ = 1000
snake_case__ = """huggingface/label-files"""
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(deit_name[-6:-4] )
snake_case__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(a , pretrained=a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
snake_case__ = create_rename_keys(a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ = encoding["""pixel_values"""]
snake_case__ = model(a )
snake_case__ = timm_model(a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 654 | 0 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[int] ) -> list[list[int]]:
"""simple docstring"""
UpperCAmelCase = []
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCAmelCase = nums.pop(0 )
UpperCAmelCase = permute(SCREAMING_SNAKE_CASE_ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE_ )
result.extend(SCREAMING_SNAKE_CASE_ )
nums.append(SCREAMING_SNAKE_CASE_ )
return result
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
def backtrack(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if start == len(SCREAMING_SNAKE_CASE_ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
UpperCAmelCase, UpperCAmelCase = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase, UpperCAmelCase = nums[i], nums[start] # backtrack
UpperCAmelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
a__ : List[str] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 51 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : torch.FloatTensor
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = num_attention_heads * attention_head_dim
snake_case__ = additional_embeddings
snake_case__ = time_embed_dim or inner_dim
snake_case__ = embedding_proj_dim or embedding_dim
snake_case__ = clip_embed_dim or embedding_dim
snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0)
snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if embedding_proj_norm_type is None:
snake_case__ = None
elif embedding_proj_norm_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''')
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if encoder_hid_proj_type is None:
snake_case__ = None
elif encoder_hid_proj_type == "linear":
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''')
snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__))
if added_emb_type == "prd":
snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__))
elif added_emb_type is None:
snake_case__ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''')
snake_case__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__)
])
if norm_in_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
elif norm_in_type is None:
snake_case__ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''')
snake_case__ = nn.LayerNorm(UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
snake_case__ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__)
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]):
if hasattr(UpperCamelCase__ , """set_processor"""):
snake_case__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
snake_case__ = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]):
if hasattr(UpperCamelCase__ , """set_processor"""):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = hidden_states.shape[0]
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.time_proj(UpperCamelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ = timesteps_projected.to(dtype=self.dtype)
snake_case__ = self.time_embedding(UpperCamelCase__)
if self.embedding_proj_norm is not None:
snake_case__ = self.embedding_proj_norm(UpperCamelCase__)
snake_case__ = self.embedding_proj(UpperCamelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
snake_case__ = self.proj_in(UpperCamelCase__)
snake_case__ = self.positional_embedding.to(hidden_states.dtype)
snake_case__ = []
snake_case__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
snake_case__ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
snake_case__ = hidden_states[:, None, :]
snake_case__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1)
additional_embeds.append(UpperCamelCase__)
snake_case__ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0)
snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
snake_case__ = self.norm_in(UpperCamelCase__)
for block in self.transformer_blocks:
snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__)
snake_case__ = self.norm_out(UpperCamelCase__)
if self.prd_embedding is not None:
snake_case__ = hidden_states[:, -1]
else:
snake_case__ = hidden_states[:, additional_embeddings_len:]
snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654 | 0 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
A = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ :Optional[Any]) -> Optional[Any]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[Any]) -> int:
return max(metric_fn(a_ , a_) for gt in ground_truths)
def __A ( a_ :List[str] , a_ :List[str] , a_ :Optional[Any]) -> Dict:
__a : List[Any] = [line.strip() for line in open(a_ , '''r''').readlines()]
__a : Dict = []
if args.gold_data_mode == "qa":
__a : Dict = pd.read_csv(a_ , sep='''\t''' , header=a_)
for answer_list in data[1]:
__a : List[str] = ast.literal_eval(a_)
answers.append(a_)
else:
__a : Union[str, Any] = [line.strip() for line in open(a_ , '''r''').readlines()]
__a : Union[str, Any] = [[reference] for reference in references]
__a : Dict = 0
for prediction, ground_truths in zip(a_ , a_):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_)
fa += metric_max_over_ground_truths(a_ , a_ , a_)
__a : List[str] = 1_0_0.0 * em / total
__a : List[str] = 1_0_0.0 * fa / total
logger.info(F"""F1: {fa:.2f}""")
logger.info(F"""EM: {em:.2f}""")
def __A ( a_ :Union[str, Any] , a_ :List[Any] , a_ :Optional[Any]) -> Optional[int]:
__a : Optional[int] = args.k
__a : Optional[int] = [line.strip() for line in open(a_ , '''r''').readlines()]
__a : Optional[Any] = [line.strip() for line in open(a_ , '''r''').readlines()]
__a : Optional[int] = 0
for hypo, reference in zip(a_ , a_):
__a : List[str] = set(hypo.split('''\t''')[:k])
__a : Dict = set(reference.split('''\t'''))
total += 1
em += len(hypo_provenance & ref_provenance) / k
__a : Tuple = 1_0_0.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""")
def __A ( a_ :Dict , a_ :Any , a_ :int) -> int:
def strip_title(a_ :List[str]):
if title.startswith('''"'''):
__a : str = title[1:]
if title.endswith('''"'''):
__a : Dict = title[:-1]
return title
__a : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device)
__a : Optional[Any] = rag_model.rag.question_encoder(a_)
__a : Dict = question_enc_outputs[0]
__a : List[str] = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
__a : Optional[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids)
__a : Dict = []
for docs in all_docs:
__a : Any = [strip_title(a_) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_))
return provenance_strings
def __A ( a_ :Optional[Any] , a_ :str , a_ :str) -> List[str]:
with torch.no_grad():
__a : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_)
__a : Optional[int] = inputs_dict.input_ids.to(args.device)
__a : List[Any] = inputs_dict.attention_mask.to(args.device)
__a : List[Any] = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__a : Optional[Any] = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_)
if args.print_predictions:
for q, a in zip(a_ , a_):
logger.info('''Q: {} - A: {}'''.format(a_ , a_))
return answers
def __A ( ) -> Union[str, Any]:
__a : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''')
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''')
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''')
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''')
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
__a : Optional[Any] = parser.parse_args()
__a : Any = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
return args
def __A ( a_ :Optional[int]) -> str:
__a : Any = {}
if args.model_type is None:
__a : List[Any] = infer_model_type(args.model_name_or_path)
assert args.model_type is not None
if args.model_type.startswith('''rag'''):
__a : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
__a : Optional[int] = args.n_docs
if args.index_name is not None:
__a : Union[str, Any] = args.index_name
if args.index_path is not None:
__a : Tuple = args.index_path
else:
__a : Tuple = BartForConditionalGeneration
__a : str = (
[f.path for f in os.scandir(args.model_name_or_path) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_)
__a : Optional[int] = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
__a : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path))
score_fn(a_ , args.predictions_path , args.gold_data_path)
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_))
logger.info(''' Batch size = %d''' , args.eval_batch_size)
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path))
if args.model_type.startswith('''rag'''):
__a : Optional[int] = RagRetriever.from_pretrained(a_ , **a_)
__a : Tuple = model_class.from_pretrained(a_ , retriever=a_ , **a_)
model.retriever.init_retrieval()
else:
__a : Dict = model_class.from_pretrained(a_ , **a_)
model.to(args.device)
with open(args.evaluation_set , '''r''') as eval_file, open(args.predictions_path , '''w''') as preds_file:
__a : Any = []
for line in tqdm(a_):
questions.append(line.strip())
if len(a_) == args.eval_batch_size:
__a : Tuple = evaluate_batch_fn(a_ , a_ , a_)
preds_file.write('''\n'''.join(a_) + '''\n''')
preds_file.flush()
__a : Dict = []
if len(a_) > 0:
__a : Optional[Any] = evaluate_batch_fn(a_ , a_ , a_)
preds_file.write('''\n'''.join(a_))
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path)
if __name__ == "__main__":
A = get_args()
main(args) | 52 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a__ = ["""gpt2"""]
a__ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = tokenizer
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__)
snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),))
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.tokenizer(UpperCamelCase__)
snake_case__ = tokenized["""input_ids"""].to_tensor()
snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
super().setUp()
snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
snake_case__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1]))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""")
snake_case__ = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case__ = python_outputs[key].numpy()
snake_case__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values))
@slow
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.function(UpperCamelCase__)
for test_inputs in self.test_sentences:
snake_case__ = tf.constant(UpperCamelCase__)
snake_case__ = compiled_tokenizer(UpperCamelCase__)
snake_case__ = tf_tokenizer(UpperCamelCase__)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = ModelToSave(tokenizer=UpperCamelCase__)
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case__ = Path(UpperCamelCase__) / """saved.model"""
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving})
snake_case__ = tf.saved_model.load(UpperCamelCase__)
snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def __magic_name__ ( self : Tuple):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs
snake_case__ = tf_tokenizer.get_config()
snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__)
snake_case__ = model_from_config(UpperCamelCase__)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def __magic_name__ ( self : Dict):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case__ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__)
snake_case__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 654 | 0 |
from __future__ import annotations
import requests
_snake_case : Any = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int = 1, lowerCAmelCase_ : str = "new", lowerCAmelCase_ : list | None = None ):
__lowerCAmelCase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase_ ) - valid_terms ) ):
__lowerCAmelCase = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(lowerCAmelCase_ )
__lowerCAmelCase = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""", headers={'User-agent': 'A random string'}, )
if response.status_code == 429:
raise requests.HTTPError
__lowerCAmelCase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase_ )}
__lowerCAmelCase = {}
for id_ in range(lowerCAmelCase_ ):
__lowerCAmelCase = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 53 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = (IPNDMScheduler,)
_lowercase : int = (('''num_inference_steps''', 50),)
def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = {"""num_train_timesteps""": 1_0_0_0}
config.update(**UpperCamelCase__)
return config
def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = 1_0
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
return sample
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""):
scheduler.set_timesteps(UpperCamelCase__)
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
| 654 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowercase : Tuple =logging.get_logger(__name__)
# General docstring
__lowercase : Dict ="""MobileNetV1Config"""
# Base docstring
__lowercase : Union[str, Any] ="""google/mobilenet_v1_1.0_224"""
__lowercase : str =[1, 1024, 7, 7]
# Image classification docstring
__lowercase : str ="""google/mobilenet_v1_1.0_224"""
__lowercase : List[Any] ="""tabby, tabby cat"""
__lowercase : Dict =[
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def a__ ( lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ ={}
if isinstance(lowercase__ , lowercase__ ):
UpperCAmelCase_ =model.mobilenet_va
else:
UpperCAmelCase_ =model
UpperCAmelCase_ ="MobilenetV1/Conv2d_0/"
UpperCAmelCase_ =backbone.conv_stem.convolution.weight
UpperCAmelCase_ =backbone.conv_stem.normalization.bias
UpperCAmelCase_ =backbone.conv_stem.normalization.weight
UpperCAmelCase_ =backbone.conv_stem.normalization.running_mean
UpperCAmelCase_ =backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
UpperCAmelCase_ =i + 1
UpperCAmelCase_ =i * 2
UpperCAmelCase_ =backbone.layer[pt_index]
UpperCAmelCase_ =F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
UpperCAmelCase_ =pointer.convolution.weight
UpperCAmelCase_ =pointer.normalization.bias
UpperCAmelCase_ =pointer.normalization.weight
UpperCAmelCase_ =pointer.normalization.running_mean
UpperCAmelCase_ =pointer.normalization.running_var
UpperCAmelCase_ =backbone.layer[pt_index + 1]
UpperCAmelCase_ =F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
UpperCAmelCase_ =pointer.convolution.weight
UpperCAmelCase_ =pointer.normalization.bias
UpperCAmelCase_ =pointer.normalization.weight
UpperCAmelCase_ =pointer.normalization.running_mean
UpperCAmelCase_ =pointer.normalization.running_var
if isinstance(lowercase__ , lowercase__ ):
UpperCAmelCase_ ="MobilenetV1/Logits/Conv2d_1c_1x1/"
UpperCAmelCase_ =model.classifier.weight
UpperCAmelCase_ =model.classifier.bias
return tf_to_pt_map
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
UpperCAmelCase_ =tf.train.list_variables(lowercase__ )
UpperCAmelCase_ ={}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
UpperCAmelCase_ =tf.train.load_variable(lowercase__ , lowercase__ )
UpperCAmelCase_ =array
# Build TF to PyTorch weights loading map
UpperCAmelCase_ =_build_tf_to_pytorch_map(lowercase__ , lowercase__ , lowercase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
UpperCAmelCase_ =tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
UpperCAmelCase_ =np.transpose(lowercase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
UpperCAmelCase_ =array.squeeze().transpose()
else:
UpperCAmelCase_ =np.transpose(lowercase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
UpperCAmelCase_ =torch.from_numpy(lowercase__ )
tf_weights.pop(lowercase__ , lowercase__ )
tf_weights.pop(name + "/RMSProp" , lowercase__ )
tf_weights.pop(name + "/RMSProp_1" , lowercase__ )
tf_weights.pop(name + "/ExponentialMovingAverage" , lowercase__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =features.shape[-2:]
UpperCAmelCase_ , UpperCAmelCase_ =conv_layer.stride
UpperCAmelCase_ , UpperCAmelCase_ =conv_layer.kernel_size
if in_height % stride_height == 0:
UpperCAmelCase_ =max(kernel_height - stride_height , 0 )
else:
UpperCAmelCase_ =max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
UpperCAmelCase_ =max(kernel_width - stride_width , 0 )
else:
UpperCAmelCase_ =max(kernel_width - (in_width % stride_width) , 0 )
UpperCAmelCase_ =pad_along_width // 2
UpperCAmelCase_ =pad_along_width - pad_left
UpperCAmelCase_ =pad_along_height // 2
UpperCAmelCase_ =pad_along_height - pad_top
UpperCAmelCase_ =(pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowercase__ , lowercase__ , "constant" , 0.0 )
class A ( nn.Module ):
def __init__( self: Optional[Any] , _lowerCAmelCase: MobileNetVaConfig , _lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: Optional[int] = 1 , _lowerCAmelCase: Optional[int] = 1 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[bool or str] = True , ) -> None:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
UpperCAmelCase_ =0 if config.tf_padding else int((kernel_size - 1) / 2 )
UpperCAmelCase_ =nn.Convad(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , kernel_size=_lowerCAmelCase , stride=_lowerCAmelCase , padding=_lowerCAmelCase , groups=_lowerCAmelCase , bias=_lowerCAmelCase , padding_mode="zeros" , )
if use_normalization:
UpperCAmelCase_ =nn.BatchNormad(
num_features=_lowerCAmelCase , eps=config.layer_norm_eps , momentum=0.99_97 , affine=_lowerCAmelCase , track_running_stats=_lowerCAmelCase , )
else:
UpperCAmelCase_ =None
if use_activation:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =ACTaFN[use_activation]
elif isinstance(config.hidden_act , _lowerCAmelCase ):
UpperCAmelCase_ =ACTaFN[config.hidden_act]
else:
UpperCAmelCase_ =config.hidden_act
else:
UpperCAmelCase_ =None
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
if self.config.tf_padding:
UpperCAmelCase_ =apply_tf_padding(_lowerCAmelCase , self.convolution )
UpperCAmelCase_ =self.convolution(_lowerCAmelCase )
if self.normalization is not None:
UpperCAmelCase_ =self.normalization(_lowerCAmelCase )
if self.activation is not None:
UpperCAmelCase_ =self.activation(_lowerCAmelCase )
return features
class A ( __lowercase ):
_snake_case =MobileNetVaConfig
_snake_case =load_tf_weights_in_mobilenet_va
_snake_case ='''mobilenet_v1'''
_snake_case ='''pixel_values'''
_snake_case =False
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Union[nn.Linear, nn.Convad] ) -> None:
'''simple docstring'''
if isinstance(_lowerCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowerCAmelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowercase : Union[str, Any] =R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__lowercase : List[Any] =R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , __lowercase , )
class A ( __lowercase ):
def __init__( self: str , _lowerCAmelCase: MobileNetVaConfig , _lowerCAmelCase: bool = True ) -> Any:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
UpperCAmelCase_ =config
UpperCAmelCase_ =32
UpperCAmelCase_ =max(int(depth * config.depth_multiplier ) , config.min_depth )
UpperCAmelCase_ =MobileNetVaConvLayer(
_lowerCAmelCase , in_channels=config.num_channels , out_channels=_lowerCAmelCase , kernel_size=3 , stride=2 , )
UpperCAmelCase_ =[1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
UpperCAmelCase_ =nn.ModuleList()
for i in range(13 ):
UpperCAmelCase_ =out_channels
if strides[i] == 2 or i == 0:
depth *= 2
UpperCAmelCase_ =max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_lowerCAmelCase , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , kernel_size=3 , stride=strides[i] , groups=_lowerCAmelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
_lowerCAmelCase , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , kernel_size=1 , ) )
UpperCAmelCase_ =nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: str ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Optional[torch.Tensor] = None , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
'''simple docstring'''
UpperCAmelCase_ =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
UpperCAmelCase_ =self.conv_stem(_lowerCAmelCase )
UpperCAmelCase_ =() if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
UpperCAmelCase_ =layer_module(_lowerCAmelCase )
if output_hidden_states:
UpperCAmelCase_ =all_hidden_states + (hidden_states,)
UpperCAmelCase_ =hidden_states
if self.pooler is not None:
UpperCAmelCase_ =torch.flatten(self.pooler(_lowerCAmelCase ) , start_dim=1 )
else:
UpperCAmelCase_ =None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCAmelCase , pooler_output=_lowerCAmelCase , hidden_states=_lowerCAmelCase , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __lowercase , )
class A ( __lowercase ):
def __init__( self: Dict , _lowerCAmelCase: MobileNetVaConfig ) -> None:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
UpperCAmelCase_ =config.num_labels
UpperCAmelCase_ =MobileNetVaModel(_lowerCAmelCase )
UpperCAmelCase_ =self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
UpperCAmelCase_ =nn.Dropout(config.classifier_dropout_prob , inplace=_lowerCAmelCase )
UpperCAmelCase_ =nn.Linear(_lowerCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Optional[torch.Tensor] = None , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: Optional[torch.Tensor] = None , _lowerCAmelCase: Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
UpperCAmelCase_ =return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ =self.mobilenet_va(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase )
UpperCAmelCase_ =outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase_ =self.classifier(self.dropout(_lowerCAmelCase ) )
UpperCAmelCase_ =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase_ ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase_ ="single_label_classification"
else:
UpperCAmelCase_ ="multi_label_classification"
if self.config.problem_type == "regression":
UpperCAmelCase_ =MSELoss()
if self.num_labels == 1:
UpperCAmelCase_ =loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCAmelCase_ =loss_fct(_lowerCAmelCase , _lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase_ =CrossEntropyLoss()
UpperCAmelCase_ =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase_ =BCEWithLogitsLoss()
UpperCAmelCase_ =loss_fct(_lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
UpperCAmelCase_ =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states , )
| 54 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
_lowercase : Dict = '''CIDAS/clipseg-rd64-refined'''
_lowercase : List[Any] = '''image_segmenter'''
_lowercase : Tuple = CLIPSegForImageSegmentation
_lowercase : str = ['''image''', '''text''']
_lowercase : Dict = ['''image''']
def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(self , ["""vision"""])
super().__init__(*UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors="""pt""")
def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
with torch.no_grad():
snake_case__ = self.model(**UpperCamelCase__).logits
return logits
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = outputs.cpu().detach().numpy()
snake_case__ = 0
snake_case__ = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 654 | 0 |
from ...configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE :Optional[int] = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "tapas"
def __init__( self : List[Any] ,A : Dict=3_05_22 ,A : int=7_68 ,A : List[Any]=12 ,A : Any=12 ,A : str=30_72 ,A : Dict="gelu" ,A : str=0.1 ,A : Any=0.1 ,A : Union[str, Any]=10_24 ,A : Dict=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] ,A : str=0.02 ,A : List[str]=1E-12 ,A : List[Any]=0 ,A : str=10.0 ,A : Any=0 ,A : Optional[int]=1.0 ,A : Optional[Any]=None ,A : Any=1.0 ,A : int=False ,A : str=None ,A : str=1.0 ,A : Optional[Any]=1.0 ,A : List[Any]=False ,A : Any=False ,A : Tuple="ratio" ,A : Optional[int]=None ,A : Any=None ,A : Optional[int]=64 ,A : List[Any]=32 ,A : int=False ,A : List[Any]=True ,A : str=False ,A : Optional[int]=False ,A : Any=True ,A : Tuple=False ,A : Tuple=None ,A : List[str]=None ,**A : Union[str, Any] ,):
super().__init__(pad_token_id=A ,**A )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_sizes
__A = initializer_range
__A = layer_norm_eps
# Fine-tuning task hyperparameters
__A = positive_label_weight
__A = num_aggregation_labels
__A = aggregation_loss_weight
__A = use_answer_as_supervision
__A = answer_loss_importance
__A = use_normalized_answer_loss
__A = huber_loss_delta
__A = temperature
__A = aggregation_temperature
__A = use_gumbel_for_cells
__A = use_gumbel_for_aggregation
__A = average_approximation_function
__A = cell_selection_preference
__A = answer_loss_cutoff
__A = max_num_rows
__A = max_num_columns
__A = average_logits_per_cell
__A = select_one_column
__A = allow_empty_column_selection
__A = init_cell_selection_weights_to_zero
__A = reset_position_index_per_cell
__A = disable_per_token_loss
# Aggregation hyperparameters
__A = aggregation_labels
__A = no_aggregation_label_index
if isinstance(self.aggregation_labels ,A ):
__A = {int(A ): v for k, v in aggregation_labels.items()}
| 55 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=1_8 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=True , ):
'''simple docstring'''
snake_case__ = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = apply_ocr
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessingTester(self)
@property
def __magic_name__ ( self : Tuple):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize"""))
self.assertTrue(hasattr(UpperCamelCase__ , """size"""))
self.assertTrue(hasattr(UpperCamelCase__ , """apply_ocr"""))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8})
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2})
def __magic_name__ ( self : List[str]):
'''simple docstring'''
pass
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase__)
self.assertIsInstance(encoding.boxes , UpperCamelCase__)
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""")
snake_case__ = Image.open(ds[0]["""file"""]).convert("""RGB""")
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase__)
self.assertListEqual(encoding.boxes , UpperCamelCase__)
# with apply_OCR = False
snake_case__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__)
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
| 654 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : str = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = params
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array([len(UpperCamelCase__) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , UpperCamelCase__ : Any):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any]):
'''simple docstring'''
return len(self.lengths)
def __magic_name__ ( self : str):
'''simple docstring'''
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.params.max_model_input_size
snake_case__ = self.lengths > max_len
logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''')
def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple):
return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)]
snake_case__ = []
snake_case__ = []
if self.params.mlm:
snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
snake_case__ = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__)
if sub_s[-1] != sep_id:
snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__)
assert len(UpperCamelCase__) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCamelCase__)
new_tok_ids.extend(UpperCamelCase__)
new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs])
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array(UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = len(self)
snake_case__ = self.lengths > 1_1
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''')
def __magic_name__ ( self : List[str]):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = len(self)
snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
snake_case__ = (unk_occs / self.lengths) < 0.5
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''')
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self)} sequences''')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = [t[0] for t in batch]
snake_case__ = [t[1] for t in batch]
assert len(UpperCamelCase__) == len(UpperCamelCase__)
# Max for paddings
snake_case__ = max(UpperCamelCase__)
# Pad token ids
if self.params.mlm:
snake_case__ = self.params.special_tok_ids["""pad_token"""]
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids]
assert len(tk_) == len(UpperCamelCase__)
assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_)
snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_)
snake_case__ = torch.tensor(UpperCamelCase__) # (bs)
return tk_t, lg_t
| 654 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A_ : Any = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> List[Any]:
if attention_mask is None:
UpperCamelCase_: Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCamelCase_: Dict = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCamelCase_: Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_: List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=9_9 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=0.0_2 , ):
UpperCamelCase_: Any = parent
UpperCamelCase_: List[Any] = batch_size
UpperCamelCase_: List[Any] = seq_length
UpperCamelCase_: Optional[int] = is_training
UpperCamelCase_: Optional[int] = use_labels
UpperCamelCase_: List[str] = vocab_size
UpperCamelCase_: Any = hidden_size
UpperCamelCase_: Tuple = num_hidden_layers
UpperCamelCase_: Dict = num_attention_heads
UpperCamelCase_: Union[str, Any] = intermediate_size
UpperCamelCase_: Optional[Any] = hidden_act
UpperCamelCase_: Tuple = hidden_dropout_prob
UpperCamelCase_: Tuple = attention_probs_dropout_prob
UpperCamelCase_: List[Any] = max_position_embeddings
UpperCamelCase_: str = eos_token_id
UpperCamelCase_: Optional[Any] = pad_token_id
UpperCamelCase_: List[str] = bos_token_id
UpperCamelCase_: List[str] = initializer_range
def _a ( self ):
UpperCamelCase_: Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCamelCase_: Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCamelCase_: Dict = shift_tokens_right(_lowerCamelCase , 1 , 2 )
UpperCamelCase_: Optional[int] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCamelCase , )
UpperCamelCase_: Union[str, Any] = prepare_blenderbot_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = 2_0
UpperCamelCase_: Tuple = model_class_name(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = model.encode(inputs_dict['input_ids'] )
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCamelCase_: Any = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
UpperCamelCase_: int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase_: Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
UpperCamelCase_: Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCamelCase_: Any = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCamelCase , )
UpperCamelCase_: List[Any] = model.decode(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = 2_0
UpperCamelCase_: List[str] = model_class_name(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = model.encode(inputs_dict['input_ids'] )
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCamelCase_: Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCamelCase_: Dict = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase_: int = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
UpperCamelCase_: Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCamelCase_: Any = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
UpperCamelCase_: Optional[int] = model.decode(_lowerCamelCase , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase )
UpperCamelCase_: List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Any =99
def _a ( self ):
UpperCamelCase_: Union[str, Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
UpperCamelCase_: str = input_ids.shape[0]
UpperCamelCase_: str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: str = self._get_config_and_data()
UpperCamelCase_: Optional[Any] = FlaxBlenderbotForConditionalGeneration(_lowerCamelCase )
UpperCamelCase_: Any = lm_model(input_ids=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
UpperCamelCase_: Optional[int] = FlaxBlenderbotForConditionalGeneration(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
UpperCamelCase_: Tuple = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
UpperCamelCase_: Tuple = lm_model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase )
UpperCamelCase_: Any = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
UpperCamelCase_: Tuple = shift_tokens_right(_lowerCamelCase , 1 , 2 )
UpperCamelCase_: Union[str, Any] = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
UpperCamelCase_: Optional[int] = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase , UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[Any] =True
a : Union[str, Any] =(
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
a : Optional[int] =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _a ( self ):
UpperCamelCase_: Optional[int] = FlaxBlenderbotModelTester(self )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_: Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = model_class(_lowerCamelCase )
@jax.jit
def encode_jitted(_lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
return model.encode(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase )
with self.subTest('JIT Enabled' ):
UpperCamelCase_: Optional[int] = encode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase_: Dict = encode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_: Tuple = model_class(_lowerCamelCase )
UpperCamelCase_: Dict = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
UpperCamelCase_: List[str] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return model.decode(
decoder_input_ids=_lowerCamelCase , decoder_attention_mask=_lowerCamelCase , encoder_outputs=_lowerCamelCase , )
with self.subTest('JIT Enabled' ):
UpperCamelCase_: List[Any] = decode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase_: Dict = decode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _a ( self ):
for model_class_name in self.all_model_classes:
UpperCamelCase_: Dict = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCamelCase_: Optional[Any] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCamelCase_: List[Any] = model(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def _a ( self ):
UpperCamelCase_: Optional[int] = {'num_beams': 1, 'early_stopping': True, 'min_length': 1_5, 'max_length': 2_5}
UpperCamelCase_: List[Any] = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
UpperCamelCase_: Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_lowerCamelCase )
UpperCamelCase_: Optional[int] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
UpperCamelCase_: int = ['Sam']
UpperCamelCase_: str = tokenizer(_lowerCamelCase , return_tensors='jax' )
UpperCamelCase_: Union[str, Any] = model.generate(**_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Any = 'Sam is a great name. It means "sun" in Gaelic.'
UpperCamelCase_: Any = tokenizer.batch_decode(_lowerCamelCase , **_lowerCamelCase )
assert generated_txt[0].strip() == tgt_text | 57 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _UpperCAmelCase ( a : str ):
if "model" in orig_key:
snake_case__ = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1]
snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
snake_case__ = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
snake_case__ = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
snake_case__ = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
snake_case__ = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
snake_case__ = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
snake_case__ = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
snake_case__ = """yoso.""" + orig_key
return orig_key
def _UpperCAmelCase ( a : Tuple , a : Dict ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(a )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case__ = val
snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""]
snake_case__ = torch.arange(a ).expand((1, -1) ) + 2
return orig_state_dict
def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ):
snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""]
snake_case__ = YosoConfig.from_json_file(a )
snake_case__ = YosoForMaskedLM(a )
snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a )
print(model.load_state_dict(a ) )
model.eval()
model.save_pretrained(a )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 654 | 0 |
"""simple docstring"""
import requests
__lowerCAmelCase : int = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Dict = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(F'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 58 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ''''''
_lowercase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase : str = None # compression type in fsspec. ex: "gzip"
_lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
super().__init__(self , **UpperCamelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ = os.path.basename(self.file.path.split("""::""")[0])
snake_case__ = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case__ = None
@classmethod
def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
return super()._strip_protocol(UpperCamelCase__).lstrip("""/""")
def __magic_name__ ( self : Dict):
'''simple docstring'''
if self.dir_cache is None:
snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
snake_case__ = {f["""name"""]: f}
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
return self.file.open().read()
def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = self._strip_protocol(UpperCamelCase__)
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''bz2'''
_lowercase : Dict = '''bz2'''
_lowercase : Optional[int] = '''.bz2'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''gzip'''
_lowercase : List[str] = '''gzip'''
_lowercase : Any = '''.gz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = '''lz4'''
_lowercase : List[Any] = '''lz4'''
_lowercase : Dict = '''.lz4'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''xz'''
_lowercase : Union[str, Any] = '''xz'''
_lowercase : Optional[int] = '''.xz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''zstd'''
_lowercase : Tuple = '''zstd'''
_lowercase : Union[str, Any] = '''.zst'''
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = file_
def __enter__( self : List[str]):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__)
def __iter__( self : Any):
'''simple docstring'''
return iter(self._file)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return next(self._file)
def __getattr__( self : Any , UpperCamelCase__ : int):
'''simple docstring'''
return getattr(self._file , UpperCamelCase__)
def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__))
snake_case__ = fixed_enter
| 654 | 0 |
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: Optional[int] =len(__a ), len(grid[0] )
if (
min(__a , __a ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCamelCase__: str =0
count += depth_first_search(__a , row + 1 , __a , __a )
count += depth_first_search(__a , row - 1 , __a , __a )
count += depth_first_search(__a , __a , col + 1 , __a )
count += depth_first_search(__a , __a , col - 1 , __a )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
def _UpperCAmelCase ( a : int ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''conditional_detr'''
lowerCamelCase_ : List[Any] = ['''past_key_values''']
lowerCamelCase_ : Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__(self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=300 , __magic_name__=6 , __magic_name__=2048 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2048 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=256 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ) -> List[Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case_ : Union[str, Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Union[str, Any] = backbone_config.get('''model_type''' )
snake_case_ : str = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(__magic_name__ )
snake_case_ : Tuple = use_timm_backbone
snake_case_ : Tuple = backbone_config
snake_case_ : Optional[int] = num_channels
snake_case_ : Dict = num_queries
snake_case_ : List[str] = d_model
snake_case_ : List[Any] = encoder_ffn_dim
snake_case_ : List[str] = encoder_layers
snake_case_ : Dict = encoder_attention_heads
snake_case_ : str = decoder_ffn_dim
snake_case_ : Tuple = decoder_layers
snake_case_ : str = decoder_attention_heads
snake_case_ : Dict = dropout
snake_case_ : Dict = attention_dropout
snake_case_ : List[str] = activation_dropout
snake_case_ : Union[str, Any] = activation_function
snake_case_ : Any = init_std
snake_case_ : Any = init_xavier_std
snake_case_ : List[str] = encoder_layerdrop
snake_case_ : Tuple = decoder_layerdrop
snake_case_ : Optional[Any] = encoder_layers
snake_case_ : List[str] = auxiliary_loss
snake_case_ : Any = position_embedding_type
snake_case_ : str = backbone
snake_case_ : List[Any] = use_pretrained_backbone
snake_case_ : List[str] = dilation
# Hungarian matcher
snake_case_ : Any = class_cost
snake_case_ : Optional[int] = bbox_cost
snake_case_ : List[Any] = giou_cost
# Loss coefficients
snake_case_ : Optional[Any] = mask_loss_coefficient
snake_case_ : Optional[Any] = dice_loss_coefficient
snake_case_ : Any = cls_loss_coefficient
snake_case_ : str = bbox_loss_coefficient
snake_case_ : Optional[int] = giou_loss_coefficient
snake_case_ : Tuple = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return self.d_model
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : int = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case_ : str = self.backbone_config.to_dict()
snake_case_ : Optional[int] = self.__class__.model_type
return output
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[str] = version.parse('''1.11''' )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return 12
| 60 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = size
snake_case__ = [0] * size
snake_case__ = [0] * size
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return (index & (index + 1)) - 1
def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = value
while index < self.size:
snake_case__ = self.get_prev(UpperCamelCase__) + 1
if current_left_border == index:
snake_case__ = value
else:
snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self.get_next(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
right -= 1 # Because of right is exclusive
snake_case__ = 0
while left <= right:
snake_case__ = self.get_prev(UpperCamelCase__)
if left <= current_left:
snake_case__ = max(UpperCamelCase__ , self.tree[right])
snake_case__ = current_left
else:
snake_case__ = max(UpperCamelCase__ , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : Any=30 , SCREAMING_SNAKE_CASE__ : int=400 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Optional[int]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=1 / 255 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_pad
def a ( self : Dict ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int=False ) -> Dict:
if not batched:
lowerCAmelCase__ = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ = int(self.size["shortest_edge"] * h / w )
lowerCAmelCase__ = self.size["shortest_edge"]
elif w > h:
lowerCAmelCase__ = self.size["shortest_edge"]
lowerCAmelCase__ = int(self.size["shortest_edge"] * w / h )
else:
lowerCAmelCase__ = self.size["shortest_edge"]
lowerCAmelCase__ = self.size["shortest_edge"]
else:
lowerCAmelCase__ = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
lowerCAmelCase__ = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = DetaImageProcessor if is_vision_available() else None
def a ( self : str ) -> Tuple:
lowerCAmelCase__ = DetaImageProcessingTester(self )
@property
def a ( self : List[Any] ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self : int ) -> List[str]:
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_rescale" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_pad" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "size" ) )
def a ( self : int ) -> List[Any]:
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] ) -> int:
pass
def a ( self : Union[str, Any] ) -> str:
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self : Dict ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a ( self : Tuple ) -> List[Any]:
# prepare image and target
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = {"image_id": 39_769, "annotations": target}
# encode them
lowerCAmelCase__ = DetaImageProcessor()
lowerCAmelCase__ = image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
# verify pixel values
lowerCAmelCase__ = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
lowerCAmelCase__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
lowerCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase__ = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
lowerCAmelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
lowerCAmelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE__ ) )
# verify size
lowerCAmelCase__ = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE__ ) )
@slow
def a ( self : Optional[int] ) -> Optional[Any]:
# prepare image, target and masks_path
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
lowerCAmelCase__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCAmelCase__ = DetaImageProcessor(format="coco_panoptic" )
lowerCAmelCase__ = image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
# verify pixel values
lowerCAmelCase__ = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
lowerCAmelCase__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
lowerCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase__ = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
lowerCAmelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE__ ) )
# verify masks
lowerCAmelCase__ = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , SCREAMING_SNAKE_CASE__ )
# verify orig_size
lowerCAmelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE__ ) )
# verify size
lowerCAmelCase__ = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE__ ) )
| 61 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
_lowercase : List[str] = PegasusConfig
_lowercase : Union[str, Any] = {}
_lowercase : Tuple = '''gelu'''
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return config, inputs_dict
def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder()
snake_case__ = inputs_dict["""input_ids"""]
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict["""attention_mask"""][:1, :]
snake_case__ = inputs_dict["""head_mask"""]
snake_case__ = 1
# first forward pass
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__)
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1)
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1)
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0]
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1]))
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3)
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ):
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_lowercase : List[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Optional[int] = True
_lowercase : Dict = False
_lowercase : Any = False
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = TFPegasusModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__)
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_lowercase : str = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_lowercase : int = '''google/pegasus-xsum'''
@cached_property
def __magic_name__ ( self : Dict):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.translate_src_text(**UpperCamelCase__)
assert self.expected_text == generated_words
def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""")
snake_case__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__)
return generated_words
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 654 | 0 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
snake_case = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
snake_case = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
snake_case = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
snake_case = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _A ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _A ( self : Any , UpperCAmelCase_ : Dict ):
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _A ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=0.9 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=0.5 ):
if NLTK_VERSION >= version.Version("3.6.5" ):
SCREAMING_SNAKE_CASE : Dict = [
meteor_score.single_meteor_score(
word_tokenize(UpperCAmelCase_ ) , word_tokenize(UpperCAmelCase_ ) , alpha=UpperCAmelCase_ , beta=UpperCAmelCase_ , gamma=UpperCAmelCase_ )
for ref, pred in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
else:
SCREAMING_SNAKE_CASE : List[Any] = [
meteor_score.single_meteor_score(UpperCAmelCase_ , UpperCAmelCase_ , alpha=UpperCAmelCase_ , beta=UpperCAmelCase_ , gamma=UpperCAmelCase_ )
for ref, pred in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
return {"meteor": np.mean(UpperCAmelCase_ )}
| 62 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a__ = logging.get_logger(__name__)
a__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
a__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
a__ = {
"""jukebox""": 5_1_2,
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token
super().__init__(
unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case__ = version
snake_case__ = max_n_lyric_tokens
snake_case__ = n_genres
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 7_9:
snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""")
snake_case__ = regex.compile(UpperCamelCase__)
snake_case__ = {v: k for k, v in self.artists_encoder.items()}
snake_case__ = {v: k for k, v in self.genres_encoder.items()}
snake_case__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists]
for genres in range(len(UpperCamelCase__)):
snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]]
snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
return list(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self._tokenize(UpperCamelCase__)
return artist, genre, lyrics
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False):
'''simple docstring'''
for idx in range(len(self.version)):
if self.version[idx] == "v3":
snake_case__ = artists[idx].lower()
snake_case__ = [genres[idx].lower()]
else:
snake_case__ = self._normalize(artists[idx]) + """.v2"""
snake_case__ = [
self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""")
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""")
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))}
snake_case__ = 0
snake_case__ = len(UpperCamelCase__) + 1
snake_case__ = self.vocab
snake_case__ = {v: k for k, v in self.vocab.items()}
snake_case__ = """"""
else:
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""")
snake_case__ = self._run_strip_accents(UpperCamelCase__)
snake_case__ = lyrics.replace("""\\""" , """\n""")
snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], []
return artists, genres, lyrics
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__)
snake_case__ = []
for char in text:
snake_case__ = unicodedata.category(UpperCamelCase__)
if cat == "Mn":
continue
output.append(UpperCamelCase__)
return "".join(UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = (
[chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)]
+ ["""."""]
)
snake_case__ = frozenset(UpperCamelCase__)
snake_case__ = re.compile(R"""_+""")
snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()])
snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""")
return text
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
return " ".join(UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = TensorType(UpperCamelCase__)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""")
import tensorflow as tf
snake_case__ = tf.constant
snake_case__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""")
import torch
snake_case__ = torch.tensor
snake_case__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""")
import jax.numpy as jnp # noqa: F811
snake_case__ = jnp.array
snake_case__ = _is_jax
else:
snake_case__ = np.asarray
snake_case__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case__ = [inputs]
if not is_tensor(UpperCamelCase__):
snake_case__ = as_tensor(UpperCamelCase__)
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""")
return inputs
def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"):
'''simple docstring'''
snake_case__ = [0, 0, 0]
snake_case__ = [artist] * len(self.version)
snake_case__ = [genres] * len(self.version)
snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = [-INFINITY] * len(full_tokens[-1])
snake_case__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__)
for i in range(len(self.version))
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks})
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__))
return (artists_file, genres_file, lyrics_file)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = self.artists_decoder.get(UpperCamelCase__)
snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index]
snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index]
return artist, genres, lyrics
| 654 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a : str = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__lowercase : Tuple , **__lowercase : str ) -> None:
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 63 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize):
'''simple docstring'''
snake_case__ = """bilinear"""
snake_case__ = max_size
snake_case__ = short_edge_length
def __call__( self : List[str] , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = []
for img in imgs:
snake_case__ , snake_case__ = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__)
if h < w:
snake_case__ , snake_case__ = size, scale * w
else:
snake_case__ , snake_case__ = scale * h, size
if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size:
snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = newh * scale
snake_case__ = neww * scale
snake_case__ = int(neww + 0.5)
snake_case__ = int(newh + 0.5)
if img.dtype == np.uinta:
snake_case__ = Image.fromarray(UpperCamelCase__)
snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
snake_case__ = np.asarray(UpperCamelCase__)
else:
snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
snake_case__ = nn.functional.interpolate(
UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0)
img_augs.append(UpperCamelCase__)
return img_augs
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
snake_case__ = cfg.INPUT.FORMAT
snake_case__ = cfg.SIZE_DIVISIBILITY
snake_case__ = cfg.PAD_VALUE
snake_case__ = cfg.INPUT.MAX_SIZE_TEST
snake_case__ = cfg.MODEL.DEVICE
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images]))
snake_case__ = [im.shape[-2:] for im in images]
snake_case__ = [
nn.functional.pad(
UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase__ , UpperCamelCase__)
]
return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__)
def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = [images]
if single_image:
assert len(UpperCamelCase__) == 1
for i in range(len(UpperCamelCase__)):
if isinstance(images[i] , torch.Tensor):
images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
snake_case__ = torch.tensor([im.shape[:2] for im in images])
snake_case__ = self.aug(UpperCamelCase__)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case__ = [self.normalizer(UpperCamelCase__) for x in images]
# now pad them to do the following operations
snake_case__ , snake_case__ = self.pad(UpperCamelCase__)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _UpperCAmelCase ( a : Optional[Any] , a : Any ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ):
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
snake_case__ , snake_case__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 654 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase_ : str = logging.get_logger(__name__)
lowercase_ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase_ : Any = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
lowercase_ : Optional[Any] = {
'junnyu/roformer_chinese_small': 1_5_3_6,
'junnyu/roformer_chinese_base': 1_5_3_6,
'junnyu/roformer_chinese_char_small': 5_1_2,
'junnyu/roformer_chinese_char_base': 5_1_2,
'junnyu/roformer_small_discriminator': 1_2_8,
'junnyu/roformer_small_generator': 1_2_8,
}
lowercase_ : str = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class _lowerCamelCase ( UpperCamelCase_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = PRETRAINED_INIT_CONFIGURATION
__a = RoFormerTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , lowerCAmelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , lowerCAmelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__: Any= getattr(lowerCAmelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= do_lower_case
SCREAMING_SNAKE_CASE__: str= strip_accents
SCREAMING_SNAKE_CASE__: Union[str, Any]= pre_tok_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= do_lower_case
def __getstate__( self ) -> Dict:
SCREAMING_SNAKE_CASE__: str= self.__dict__.copy()
SCREAMING_SNAKE_CASE__: Tuple= BertPreTokenizer()
return state
def __setstate__( self , lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Tuple= d
SCREAMING_SNAKE_CASE__: Tuple= self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__: str= PreTokenizer.custom(JiebaPreTokenizer(lowerCAmelCase ) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> List[int]:
SCREAMING_SNAKE_CASE__: Dict= [self.sep_token_id]
SCREAMING_SNAKE_CASE__: str= [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__: Tuple= self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , **lowerCAmelCase , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= BertPreTokenizer()
return super().save_pretrained(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
| 64 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''wavlm'''
def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = conv_bias
snake_case__ = num_buckets
snake_case__ = max_bucket_distance
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim)
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 654 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : UNetaDModel
_lowercase : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__)
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
snake_case__ = self.unet.config.sample_size
snake_case__ = (batch_size, 3, img_size, img_size)
snake_case__ = self.unet
snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma
snake_case__ = sample.to(self.device)
self.scheduler.set_timesteps(UpperCamelCase__)
self.scheduler.set_sigmas(UpperCamelCase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# prediction step
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__)
snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean
snake_case__ = sample_mean.clamp(0 , 1)
snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__)
| 654 | 0 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowercase : int = 1
_lowercase : List[str] = 1
while repunit:
_lowercase : Union[str, Any] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __magic_name__ ( SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
_lowercase : Tuple = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def __magic_name__ ( self : int):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : str):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 654 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
snake_case = logging.get_logger(__name__)
snake_case = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''whisper'''
SCREAMING_SNAKE_CASE_ : str = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int ,__A : Dict=5_1865 ,__A : List[str]=80 ,__A : str=6 ,__A : List[str]=4 ,__A : int=6 ,__A : Optional[Any]=4 ,__A : int=1536 ,__A : List[str]=1536 ,__A : Optional[Any]=0.0 ,__A : Union[str, Any]=0.0 ,__A : int=5_0257 ,__A : Dict=True ,__A : int=True ,__A : int="gelu" ,__A : Union[str, Any]=256 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : Optional[int]=0.0 ,__A : List[Any]=0.02 ,__A : Optional[int]=False ,__A : int=1500 ,__A : Union[str, Any]=448 ,__A : Optional[Any]=5_0256 ,__A : List[str]=5_0256 ,__A : Dict=5_0256 ,__A : int=None ,__A : Optional[Any]=[220, 5_0256] ,__A : Optional[Any]=False ,__A : Dict=256 ,__A : Tuple=False ,__A : Union[str, Any]=0.05 ,__A : Optional[Any]=10 ,__A : int=2 ,__A : Optional[int]=0.0 ,__A : Optional[int]=10 ,__A : Any=0 ,__A : Optional[int]=7 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = num_mel_bins
_lowercase = d_model
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = encoder_ffn_dim
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase = max_source_positions
_lowercase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_lowercase = classifier_proj_size
_lowercase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase = apply_spec_augment
_lowercase = mask_time_prob
_lowercase = mask_time_length
_lowercase = mask_time_min_masks
_lowercase = mask_feature_prob
_lowercase = mask_feature_length
_lowercase = mask_feature_min_masks
_lowercase = median_filter_width
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,suppress_tokens=__A ,begin_suppress_tokens=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
_lowercase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
return common_inputs
def __UpperCAmelCase ( self : Tuple ,__A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional["TensorType"] = None ,__A : int = 2_2050 ,__A : float = 5.0 ,__A : int = 220 ,) -> Mapping[str, Any]:
_lowercase = OrderedDict()
_lowercase = OnnxConfig.generate_dummy_inputs(
self ,preprocessor=preprocessor.feature_extractor ,batch_size=__A ,framework=__A ,sampling_rate=__A ,time_duration=__A ,frequency=__A ,)
_lowercase = encoder_inputs['input_features'].shape[2]
_lowercase = encoder_sequence_length // 2 if self.use_past else seq_length
_lowercase = super().generate_dummy_inputs(
preprocessor.tokenizer ,__A ,__A ,__A ,__A )
_lowercase = encoder_inputs.pop('input_features' )
_lowercase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
_lowercase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> float:
return 1e-3 | 67 |
a__ = [0, 2, 4, 6, 8]
a__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ = 0
for digit in range(10 ):
snake_case__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a , a )
return result
snake_case__ = 0
for digita in range(10 ):
snake_case__ = digita
if (remainder + digita) % 2 == 0:
snake_case__ = ODD_DIGITS
else:
snake_case__ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , )
return result
def _UpperCAmelCase ( a : int = 9 ):
snake_case__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a , 0 , [0] * length , a )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]:
"""simple docstring"""
if rng is None:
__UpperCAmelCase =random.Random()
__UpperCAmelCase =1
for dim in shape:
total_dims *= dim
__UpperCAmelCase =[]
for _ in range(A_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ )
return output
def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any:
"""simple docstring"""
__UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ )
# make sure that at least one token is attended to for each batch
__UpperCAmelCase =1
return attn_mask
@require_flax
class _A :
"""simple docstring"""
lowerCamelCase : Optional[Any] = None
lowerCamelCase : int = ()
def _a ( self : str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__UpperCAmelCase =2
__UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2
__UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length]
__UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__UpperCAmelCase =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__UpperCAmelCase =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _a ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =0
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval()
__UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params )
__UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences
__UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _a ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
__UpperCAmelCase =0.8
__UpperCAmelCase =10
__UpperCAmelCase =0.3
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =2
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ) -> Any:
__UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__UpperCAmelCase ="""Hello world"""
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ):
model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ):
__UpperCAmelCase ={"""foo""": """bar"""}
model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 68 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = '''facebook/nllb-200-distilled-600M'''
_lowercase : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase : Optional[int] = '''translator'''
_lowercase : Optional[Any] = AutoTokenizer
_lowercase : Dict = AutoModelForSeqaSeqLM
_lowercase : List[str] = LANGUAGE_CODES
_lowercase : Optional[Any] = ['''text''', '''text''', '''text''']
_lowercase : Tuple = ['''text''']
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
snake_case__ = self.lang_to_code[src_lang]
snake_case__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__)
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.model.generate(**UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
| 654 | 0 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> Union[str, Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Dict , a_ : nn.Module , a_ : int ):
"""simple docstring"""
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , a_ , bias=a_ ) , nn.Linear(a_ , module.out_features , bias=a_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=a_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def A ( self : str , a_ : List[Any] , *a_ : int , **a_ : int ):
"""simple docstring"""
return self.module(a_ , *a_ , **a_ ) + self.adapter(a_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__SCREAMING_SNAKE_CASE = """bigscience/bloom-1b7"""
# Constant values
__SCREAMING_SNAKE_CASE = 2.109_6595_5269_2574
__SCREAMING_SNAKE_CASE = """Hello my name is"""
__SCREAMING_SNAKE_CASE = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
__SCREAMING_SNAKE_CASE = 10
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
def A ( self : List[str] ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.model_abit.config
self.assertTrue(hasattr(a_ , "quantization_config" ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def A ( self : Optional[Any] ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def A ( self : int ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(a_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a_ ) , self.EXPECTED_OUTPUTS )
def A ( self : int ):
"""simple docstring"""
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a_ , device_map="auto" )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a_ ) , self.EXPECTED_OUTPUTS )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(a_ )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = BitsAndBytesConfig()
with self.assertRaises(a_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a_ , load_in_abit=a_ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def A ( self : int ):
"""simple docstring"""
with self.assertRaises(a_ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(a_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(a_ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(a_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(a_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to("cpu" )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=a_ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@classmethod
def A ( cls : Optional[int] ):
"""simple docstring"""
__snake_case = "t5-small"
__snake_case = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = "Translate in German: Hello, my dog is cute"
def A ( self : List[str] ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str] ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case = model.generate(**a_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a_ , device_map="auto" )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case = model.generate(**a_ )
__snake_case = modules
def A ( self : Optional[Any] ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case = model.generate(**a_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a_ , device_map="auto" )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case = model.generate(**a_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# model_name
__snake_case = "bigscience/bloom-560m"
__snake_case = "t5-small"
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=a_ , device_map="auto" )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=a_ , device_map="auto" )
def A ( self : Dict ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A ( self : int ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : str ):
"""simple docstring"""
super().setUp()
def A ( self : str ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Any ):
"""simple docstring"""
super().setUp()
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=a_ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a_ ) , self.EXPECTED_OUTPUTS )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : int ):
"""simple docstring"""
__snake_case = "facebook/opt-350m"
super().setUp()
def A ( self : Dict ):
"""simple docstring"""
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(a_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**a_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(a_ , a_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(a_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """gpt2-xl"""
__SCREAMING_SNAKE_CASE = 3.3191_8548_5415_2187
| 69 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( a : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = module
snake_case__ = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , )
snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str):
'''simple docstring'''
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : Any = 2.109_6595_5269_2574
_lowercase : Tuple = '''Hello my name is'''
_lowercase : List[Any] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : List[str] = 10
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = AutoTokenizer.from_pretrained(self.model_name)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : str):
'''simple docstring'''
super().setUp()
# Models and tokenizer
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""")
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config"""))
snake_case__ = config.to_dict()
snake_case__ = config.to_diff_dict()
snake_case__ = config.to_json_string()
def __magic_name__ ( self : Dict):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
snake_case__ = self.model_fpaa.get_memory_footprint()
snake_case__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
snake_case__ = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
snake_case__ = True
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__):
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__):
# Tries with `str`
self.model_abit.to("""cpu""")
with self.assertRaises(UpperCamelCase__):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0"""))
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_fpaa.to(torch.floataa)
snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
snake_case__ = self.model_fpaa.to("""cpu""")
# Check this does not throw an error
snake_case__ = self.model_fpaa.half()
# Check this does not throw an error
snake_case__ = self.model_fpaa.float()
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""")
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __magic_name__ ( cls : Optional[Any]):
'''simple docstring'''
snake_case__ = """t5-small"""
snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
snake_case__ = AutoTokenizer.from_pretrained(cls.model_name)
snake_case__ = """Translate in German: Hello, my dog is cute"""
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any):
'''simple docstring'''
from transformers import TaForConditionalGeneration
snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules
snake_case__ = None
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
snake_case__ = modules
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
super().setUp()
# model_name
snake_case__ = """bigscience/bloom-560m"""
snake_case__ = """t5-small"""
# Different types of model
snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Sequence classification model
snake_case__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# CausalLM model
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Seq2seq model
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : List[str]):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
snake_case__ = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""")
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
# Second real batch
snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = """facebook/opt-350m"""
super().setUp()
def __magic_name__ ( self : Any):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""):
return
# Step 1: freeze all parameters
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
snake_case__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
snake_case__ = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__)):
snake_case__ = LoRALayer(module.q_proj , rank=1_6)
snake_case__ = LoRALayer(module.k_proj , rank=1_6)
snake_case__ = LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
snake_case__ = model.forward(**UpperCamelCase__)
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(UpperCamelCase__ , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = '''gpt2-xl'''
_lowercase : Any = 3.3191_8548_5415_2187
| 654 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A( unittest.TestCase ):
'''simple docstring'''
@property
def a__ ( self : Dict ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.dummy_uncond_unet
lowerCamelCase_ = ScoreSdeVeScheduler()
lowerCamelCase_ = ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=A_ ).images
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=A_ , return_dict=A_ )[
0
]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = 'google/ncsnpp-church-256'
lowerCamelCase_ = UNetaDModel.from_pretrained(A_ )
lowerCamelCase_ = ScoreSdeVeScheduler.from_pretrained(A_ )
lowerCamelCase_ = ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=A_ ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 70 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a__ = """"""
a__ = """"""
a__ = """"""
a__ = 1 # (0 is vertical, 1 is horizontal)
def _UpperCAmelCase ( ):
snake_case__ , snake_case__ = get_dataset(a , a )
print("""Processing...""" )
snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ = random_chars(32 )
snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(a )} with {file_name}''' )
snake_case__ = []
for anno in new_annos[index]:
snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(a )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = []
snake_case__ = []
for label_file in glob.glob(os.path.join(a , """*.txt""" ) ):
snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(a ) as in_file:
snake_case__ = in_file.readlines()
snake_case__ = os.path.join(a , F'''{label_name}.jpg''' )
snake_case__ = []
for obj_list in obj_lists:
snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _UpperCAmelCase ( a : list , a : list , a : int = 1 ):
snake_case__ = []
snake_case__ = []
snake_case__ = []
for idx in range(len(a ) ):
snake_case__ = []
snake_case__ = img_list[idx]
path_list.append(a )
snake_case__ = anno_list[idx]
snake_case__ = cva.imread(a )
if flip_type == 1:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _UpperCAmelCase ( a : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 654 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a__ = 5_0_0_0_0_0
a__ , a__ = os.path.split(__file__)
a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ):
snake_case__ = dataset.map(**a )
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ):
snake_case__ = dataset.filter(**a )
def _UpperCAmelCase ( ):
snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case__ = generate_example_dataset(
os.path.join(a , """dataset.arrow""" ) , a , num_examples=a )
snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a )
def tokenize(a : Union[str, Any] ):
return tokenizer(examples["""text"""] )
snake_case__ = map(a )
snake_case__ = map(a , batched=a )
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""numpy""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""pandas""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
snake_case__ = map(a , function=a , batched=a )
snake_case__ = filter(a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a , """wb""" ) as f:
f.write(json.dumps(a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 654 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'xlm-roberta'
def __init__( self , snake_case_=3_05_22 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
lowercase =vocab_size
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =hidden_act
lowercase =intermediate_size
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =max_position_embeddings
lowercase =type_vocab_size
lowercase =initializer_range
lowercase =layer_norm_eps
lowercase =position_embedding_type
lowercase =use_cache
lowercase =classifier_dropout
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
@property
def _A( self ):
if self.task == "multiple-choice":
lowercase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 72 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : List[str] , a : Any=False ):
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = """"""
else:
snake_case__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ):
snake_case__ = dct.pop(a )
snake_case__ = val
def _UpperCAmelCase ( ):
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : List[str] , a : Tuple ):
snake_case__ = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ = 1000
snake_case__ = """huggingface/label-files"""
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(deit_name[-6:-4] )
snake_case__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(a , pretrained=a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
snake_case__ = create_rename_keys(a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ = encoding["""pixel_values"""]
snake_case__ = model(a )
snake_case__ = timm_model(a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 654 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : Dict = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _snake_case ( A__ ):
_lowercase : int = '''gptj'''
_lowercase : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=5_0400 , a=2048 , a=4096 , a=28 , a=16 , a=64 , a=None , a="gelu_new" , a=0.0 , a=0.0 , a=0.0 , a=1E-5 , a=0.02 , a=True , a=5_0256 , a=5_0256 , a=False , **a , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_embd
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = rotary_dim
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(
bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a)
class _snake_case ( A__ ):
def __init__( self , a , a = "default" , a = None , a = False , ) -> Union[str, Any]:
super().__init__(a , task=a , patching_specs=a , use_past=a)
if not getattr(self._config , 'pad_token_id' , a):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE = 0
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(a , direction='inputs')
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return self._config.n_head
def SCREAMING_SNAKE_CASE__ ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE = super(a , self).generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a)
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE = seqlen + 2
SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE = [
(torch.zeros(a), torch.zeros(a)) for _ in range(self.num_layers)
]
SCREAMING_SNAKE_CASE = common_inputs['attention_mask']
if self.use_past:
SCREAMING_SNAKE_CASE = ordered_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(a , a , dtype=a)] , dim=1)
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return 13
| 73 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : torch.FloatTensor
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = num_attention_heads * attention_head_dim
snake_case__ = additional_embeddings
snake_case__ = time_embed_dim or inner_dim
snake_case__ = embedding_proj_dim or embedding_dim
snake_case__ = clip_embed_dim or embedding_dim
snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0)
snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if embedding_proj_norm_type is None:
snake_case__ = None
elif embedding_proj_norm_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''')
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if encoder_hid_proj_type is None:
snake_case__ = None
elif encoder_hid_proj_type == "linear":
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''')
snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__))
if added_emb_type == "prd":
snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__))
elif added_emb_type is None:
snake_case__ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''')
snake_case__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__)
])
if norm_in_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
elif norm_in_type is None:
snake_case__ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''')
snake_case__ = nn.LayerNorm(UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
snake_case__ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__)
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]):
if hasattr(UpperCamelCase__ , """set_processor"""):
snake_case__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
snake_case__ = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]):
if hasattr(UpperCamelCase__ , """set_processor"""):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = hidden_states.shape[0]
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.time_proj(UpperCamelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ = timesteps_projected.to(dtype=self.dtype)
snake_case__ = self.time_embedding(UpperCamelCase__)
if self.embedding_proj_norm is not None:
snake_case__ = self.embedding_proj_norm(UpperCamelCase__)
snake_case__ = self.embedding_proj(UpperCamelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
snake_case__ = self.proj_in(UpperCamelCase__)
snake_case__ = self.positional_embedding.to(hidden_states.dtype)
snake_case__ = []
snake_case__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
snake_case__ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
snake_case__ = hidden_states[:, None, :]
snake_case__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1)
additional_embeds.append(UpperCamelCase__)
snake_case__ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0)
snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
snake_case__ = self.norm_in(UpperCamelCase__)
for block in self.transformer_blocks:
snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__)
snake_case__ = self.norm_out(UpperCamelCase__)
if self.prd_embedding is not None:
snake_case__ = hidden_states[:, -1]
else:
snake_case__ = hidden_states[:, additional_embeddings_len:]
snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
__SCREAMING_SNAKE_CASE : int = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('''RGB''' )
__SCREAMING_SNAKE_CASE : Optional[int] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__SCREAMING_SNAKE_CASE : Any = transform(snake_case ).unsqueeze(0 ).to(snake_case )
return image
def a__ ( snake_case ):
"""simple docstring"""
if "visual_encoder" in key:
__SCREAMING_SNAKE_CASE : Dict = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , snake_case )
if "blocks" in key:
__SCREAMING_SNAKE_CASE : Tuple = re.sub(R'''blocks''' , '''layers''' , snake_case )
if "attn" in key:
__SCREAMING_SNAKE_CASE : int = re.sub(R'''attn''' , '''self_attn''' , snake_case )
if "norm1" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(R'''norm1''' , '''layer_norm1''' , snake_case )
if "norm2" in key:
__SCREAMING_SNAKE_CASE : str = re.sub(R'''norm2''' , '''layer_norm2''' , snake_case )
if "encoder.norm" in key:
__SCREAMING_SNAKE_CASE : str = re.sub(R'''encoder.norm''' , '''post_layernorm''' , snake_case )
if "encoder.patch_embed.proj" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , snake_case )
if "encoder.pos_embed" in key:
__SCREAMING_SNAKE_CASE : Any = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , snake_case )
if "encoder.cls_token" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , snake_case )
if "self_attn" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , snake_case )
return key
@torch.no_grad()
def a__ ( snake_case , snake_case=None ):
"""simple docstring"""
if config_path is not None:
__SCREAMING_SNAKE_CASE : List[str] = BlipConfig.from_pretrained(snake_case )
else:
__SCREAMING_SNAKE_CASE : Tuple = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__SCREAMING_SNAKE_CASE : Optional[Any] = BlipForConditionalGeneration(snake_case ).eval()
__SCREAMING_SNAKE_CASE : Tuple = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
__SCREAMING_SNAKE_CASE : Optional[Any] = blip_decoder(pretrained=snake_case , image_size=384 , vit='''base''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pt_model.eval()
__SCREAMING_SNAKE_CASE : Tuple = pt_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE : List[str] = modified_state_dict.pop(snake_case )
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = value
hf_model.load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : List[Any] = load_demo_image(image_size=snake_case , device='''cpu''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(['''a picture of'''] ).input_ids
__SCREAMING_SNAKE_CASE : Dict = hf_model.generate(snake_case , snake_case )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
__SCREAMING_SNAKE_CASE : Optional[Any] = hf_model.generate(snake_case )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__SCREAMING_SNAKE_CASE : Dict = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
__SCREAMING_SNAKE_CASE : Dict = blip_vqa(pretrained=snake_case , image_size=snake_case , vit='''base''' )
vqa_model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = vqa_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE : List[str] = modified_state_dict.pop(snake_case )
__SCREAMING_SNAKE_CASE : str = rename_key(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = value
__SCREAMING_SNAKE_CASE : str = BlipForQuestionAnswering(snake_case )
hf_vqa_model.load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : Dict = ['''How many dogs are in this image?''']
__SCREAMING_SNAKE_CASE : Any = tokenizer(snake_case , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE : Tuple = hf_vqa_model.generate(snake_case , snake_case )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
__SCREAMING_SNAKE_CASE : str = blip_itm(pretrained=snake_case , image_size=snake_case , vit='''base''' )
itm_model.eval()
__SCREAMING_SNAKE_CASE : List[str] = itm_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE : List[str] = modified_state_dict.pop(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = rename_key(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = value
__SCREAMING_SNAKE_CASE : int = BlipForImageTextRetrieval(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = ['''A picture of a woman with a dog sitting in a beach''']
__SCREAMING_SNAKE_CASE : Any = tokenizer(
snake_case , return_tensors='''pt''' , padding='''max_length''' , truncation=snake_case , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case )
hf_itm_model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = hf_itm_model(snake_case , snake_case , use_itm_head=snake_case )
__SCREAMING_SNAKE_CASE : int = hf_itm_model(snake_case , snake_case , use_itm_head=snake_case )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowercase_ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 74 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a__ = ["""gpt2"""]
a__ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = tokenizer
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__)
snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),))
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.tokenizer(UpperCamelCase__)
snake_case__ = tokenized["""input_ids"""].to_tensor()
snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
super().setUp()
snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
snake_case__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1]))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""")
snake_case__ = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case__ = python_outputs[key].numpy()
snake_case__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values))
@slow
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.function(UpperCamelCase__)
for test_inputs in self.test_sentences:
snake_case__ = tf.constant(UpperCamelCase__)
snake_case__ = compiled_tokenizer(UpperCamelCase__)
snake_case__ = tf_tokenizer(UpperCamelCase__)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = ModelToSave(tokenizer=UpperCamelCase__)
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case__ = Path(UpperCamelCase__) / """saved.model"""
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving})
snake_case__ = tf.saved_model.load(UpperCamelCase__)
snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def __magic_name__ ( self : Tuple):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs
snake_case__ = tf_tokenizer.get_config()
snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__)
snake_case__ = model_from_config(UpperCamelCase__)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def __magic_name__ ( self : Dict):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case__ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__)
snake_case__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 654 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self : List[str] , _A : Optional[Any] , _A : str=13 , _A : List[Any]=32 , _A : Tuple=3 , _A : Tuple=4 , _A : List[str]=[10, 20, 30, 40] , _A : Tuple=[2, 2, 3, 2] , _A : List[str]=True , _A : List[str]=True , _A : Optional[int]=37 , _A : Union[str, Any]="gelu" , _A : int=10 , _A : List[str]=0.0_2 , _A : Tuple=["stage2", "stage3", "stage4"] , _A : Dict=3 , _A : Dict=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : int = num_channels
UpperCAmelCase__ : int = num_stages
UpperCAmelCase__ : List[Any] = hidden_sizes
UpperCAmelCase__ : Dict = depths
UpperCAmelCase__ : Optional[int] = is_training
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Any = out_features
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : List[str] = num_stages
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : int ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase_ ( self : str ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_A , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_A , loss_ignore_index=255 , num_labels=self.num_labels , )
def lowercase_ ( self : Any , _A : int , _A : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = UperNetForSemanticSegmentation(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = UperNetModelTester(self )
UpperCAmelCase__ : Dict = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Dict ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : Dict ):
'''simple docstring'''
return
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
UpperCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Any = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def lowercase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(_A : Any , _A : List[Any] , _A : Dict ):
UpperCAmelCase__ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : List[str] = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = _config_zero_init(_A )
UpperCAmelCase__ : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(config=_A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Tuple:
UpperCAmelCase__ : Optional[int] = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
UpperCAmelCase__ : Optional[Any] = Image.open(lowerCAmelCase__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_A )
UpperCAmelCase__ : Union[str, Any] = prepare_img()
UpperCAmelCase__ : Union[str, Any] = processor(images=_A , return_tensors='''pt''' ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**_A )
UpperCAmelCase__ : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Any = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1e-4 ) )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_A )
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : Dict = processor(images=_A , return_tensors='''pt''' ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**_A )
UpperCAmelCase__ : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1e-4 ) )
| 75 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = (IPNDMScheduler,)
_lowercase : int = (('''num_inference_steps''', 50),)
def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = {"""num_train_timesteps""": 1_0_0_0}
config.update(**UpperCamelCase__)
return config
def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = 1_0
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
return sample
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""):
scheduler.set_timesteps(UpperCamelCase__)
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
| 654 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="swinv2"
UpperCamelCase ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , UpperCamelCase_=2_24 , UpperCamelCase_=4 , UpperCamelCase_=3 , UpperCamelCase_=96 , UpperCamelCase_=[2, 2, 6, 2] , UpperCamelCase_=[3, 6, 12, 24] , UpperCamelCase_=7 , UpperCamelCase_=4.0 , UpperCamelCase_=True , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_="gelu" , UpperCamelCase_=False , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-5 , UpperCamelCase_=32 , **UpperCamelCase_ , ) -> List[Any]:
super().__init__(**UpperCamelCase_ )
__lowercase : Union[str, Any] = image_size
__lowercase : Optional[int] = patch_size
__lowercase : int = num_channels
__lowercase : List[str] = embed_dim
__lowercase : List[Any] = depths
__lowercase : List[str] = len(UpperCamelCase_ )
__lowercase : List[Any] = num_heads
__lowercase : Optional[int] = window_size
__lowercase : int = mlp_ratio
__lowercase : Any = qkv_bias
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Optional[int] = drop_path_rate
__lowercase : Tuple = hidden_act
__lowercase : Tuple = use_absolute_embeddings
__lowercase : Dict = layer_norm_eps
__lowercase : Dict = initializer_range
__lowercase : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase : Optional[int] = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__lowercase : Union[str, Any] = (0, 0, 0, 0)
| 76 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
_lowercase : Dict = '''CIDAS/clipseg-rd64-refined'''
_lowercase : List[Any] = '''image_segmenter'''
_lowercase : Tuple = CLIPSegForImageSegmentation
_lowercase : str = ['''image''', '''text''']
_lowercase : Dict = ['''image''']
def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(self , ["""vision"""])
super().__init__(*UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors="""pt""")
def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
with torch.no_grad():
snake_case__ = self.model(**UpperCamelCase__).logits
return logits
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = outputs.cpu().detach().numpy()
snake_case__ = 0
snake_case__ = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 654 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
A = list[tuple[int, int]]
A = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class a__ :
def __init__( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Node | None):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = pos_x
__UpperCAmelCase : List[str] = pos_y
__UpperCAmelCase : Optional[int] = (pos_y, pos_x)
__UpperCAmelCase : Union[str, Any] = goal_x
__UpperCAmelCase : Optional[Any] = goal_y
__UpperCAmelCase : List[str] = parent
class a__ :
def __init__( self : Optional[int] , UpperCamelCase_ : tuple[int, int] , UpperCamelCase_ : tuple[int, int]):
"""simple docstring"""
__UpperCAmelCase : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCamelCase_)
__UpperCAmelCase : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCamelCase_)
__UpperCAmelCase : int = [self.start]
__UpperCAmelCase : Tuple = False
def a_ ( self : str):
"""simple docstring"""
while self.node_queue:
__UpperCAmelCase : Dict = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
__UpperCAmelCase : List[str] = True
return self.retrace_path(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = self.get_successors(UpperCamelCase_)
for node in successors:
self.node_queue.append(UpperCamelCase_)
if not self.reached:
return [self.start.pos]
return None
def a_ ( self : Tuple , UpperCamelCase_ : Node):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = []
for action in delta:
__UpperCAmelCase : Union[str, Any] = parent.pos_x + action[1]
__UpperCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(UpperCamelCase_) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCamelCase_ , UpperCamelCase_ , self.target.pos_y , self.target.pos_x , UpperCamelCase_))
return successors
def a_ ( self : Any , UpperCamelCase_ : Node | None):
"""simple docstring"""
__UpperCAmelCase : str = node
__UpperCAmelCase : Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__UpperCAmelCase : List[Any] = current_node.parent
path.reverse()
return path
class a__ :
def __init__( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = BreadthFirstSearch(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : List[str] = BreadthFirstSearch(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : int = False
def a_ ( self : Any):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__UpperCAmelCase : Any = self.fwd_bfs.node_queue.pop(0)
__UpperCAmelCase : str = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
__UpperCAmelCase : List[Any] = True
return self.retrace_bidirectional_path(
UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : List[str] = current_bwd_node
__UpperCAmelCase : List[Any] = current_fwd_node
__UpperCAmelCase : Dict = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCamelCase_),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCamelCase_),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCamelCase_)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def a_ ( self : List[Any] , UpperCamelCase_ : Node , UpperCamelCase_ : Node):
"""simple docstring"""
__UpperCAmelCase : str = self.fwd_bfs.retrace_path(UpperCamelCase_)
__UpperCAmelCase : int = self.bwd_bfs.retrace_path(UpperCamelCase_)
bwd_path.pop()
bwd_path.reverse()
__UpperCAmelCase : Optional[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A = (0, 0)
A = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A = time.time()
A = BreadthFirstSearch(init, goal)
A = bfs.search()
A = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
A = time.time()
A = BidirectionalBreadthFirstSearch(init, goal)
A = bd_bfs.search()
A = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 77 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=1_8 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=True , ):
'''simple docstring'''
snake_case__ = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = apply_ocr
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessingTester(self)
@property
def __magic_name__ ( self : Tuple):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize"""))
self.assertTrue(hasattr(UpperCamelCase__ , """size"""))
self.assertTrue(hasattr(UpperCamelCase__ , """apply_ocr"""))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8})
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2})
def __magic_name__ ( self : List[str]):
'''simple docstring'''
pass
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase__)
self.assertIsInstance(encoding.boxes , UpperCamelCase__)
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""")
snake_case__ = Image.open(ds[0]["""file"""]).convert("""RGB""")
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase__)
self.assertListEqual(encoding.boxes , UpperCamelCase__)
# with apply_OCR = False
snake_case__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__)
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
| 654 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
if args.model_type == "bert":
SCREAMING_SNAKE_CASE_: Optional[Any] =BertForMaskedLM.from_pretrained(args.model_name)
SCREAMING_SNAKE_CASE_: str ='bert'
else:
raise ValueError('args.model_type should be "bert".')
SCREAMING_SNAKE_CASE_: Any =model.state_dict()
SCREAMING_SNAKE_CASE_: int ={}
for w in ["word_embeddings", "position_embeddings"]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =state_dict[f"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE_: List[str] =state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
SCREAMING_SNAKE_CASE_: str =0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE_: Optional[int] =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
SCREAMING_SNAKE_CASE_: Dict =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
SCREAMING_SNAKE_CASE_: str =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
SCREAMING_SNAKE_CASE_: List[str] =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
SCREAMING_SNAKE_CASE_: Any =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
SCREAMING_SNAKE_CASE_: Tuple =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
SCREAMING_SNAKE_CASE_: Optional[Any] =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
SCREAMING_SNAKE_CASE_: Any =state_dict['cls.predictions.decoder.weight']
SCREAMING_SNAKE_CASE_: Union[str, Any] =state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE_: str =state_dict[f"cls.predictions.transform.dense.{w}"]
SCREAMING_SNAKE_CASE_: Optional[int] =state_dict[f"cls.predictions.transform.LayerNorm.{w}"]
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 78 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = params
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array([len(UpperCamelCase__) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , UpperCamelCase__ : Any):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any]):
'''simple docstring'''
return len(self.lengths)
def __magic_name__ ( self : str):
'''simple docstring'''
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.params.max_model_input_size
snake_case__ = self.lengths > max_len
logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''')
def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple):
return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)]
snake_case__ = []
snake_case__ = []
if self.params.mlm:
snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
snake_case__ = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__)
if sub_s[-1] != sep_id:
snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__)
assert len(UpperCamelCase__) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCamelCase__)
new_tok_ids.extend(UpperCamelCase__)
new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs])
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array(UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = len(self)
snake_case__ = self.lengths > 1_1
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''')
def __magic_name__ ( self : List[str]):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = len(self)
snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
snake_case__ = (unk_occs / self.lengths) < 0.5
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''')
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self)} sequences''')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = [t[0] for t in batch]
snake_case__ = [t[1] for t in batch]
assert len(UpperCamelCase__) == len(UpperCamelCase__)
# Max for paddings
snake_case__ = max(UpperCamelCase__)
# Pad token ids
if self.params.mlm:
snake_case__ = self.params.special_tok_ids["""pad_token"""]
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids]
assert len(tk_) == len(UpperCamelCase__)
assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_)
snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_)
snake_case__ = torch.tensor(UpperCamelCase__) # (bs)
return tk_t, lg_t
| 654 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""],
"""processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["""VisionTextDualEncoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = ["""FlaxVisionTextDualEncoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ["""TFVisionTextDualEncoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 79 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _UpperCAmelCase ( a : str ):
if "model" in orig_key:
snake_case__ = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1]
snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
snake_case__ = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
snake_case__ = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
snake_case__ = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
snake_case__ = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
snake_case__ = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
snake_case__ = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
snake_case__ = """yoso.""" + orig_key
return orig_key
def _UpperCAmelCase ( a : Tuple , a : Dict ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(a )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case__ = val
snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""]
snake_case__ = torch.arange(a ).expand((1, -1) ) + 2
return orig_state_dict
def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ):
snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""]
snake_case__ = YosoConfig.from_json_file(a )
snake_case__ = YosoForMaskedLM(a )
snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a )
print(model.load_state_dict(a ) )
model.eval()
model.save_pretrained(a )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 654 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
__snake_case :List[Any] = 'convnextv2'
def __init__( self : List[Any] , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Any=4 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : str=224 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Any=None , **_lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = num_channels
__lowercase = patch_size
__lowercase = num_stages
__lowercase = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__lowercase = [3, 3, 9, 3] if depths is None else depths
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = drop_path_rate
__lowercase = image_size
__lowercase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
| 80 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ''''''
_lowercase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase : str = None # compression type in fsspec. ex: "gzip"
_lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
super().__init__(self , **UpperCamelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ = os.path.basename(self.file.path.split("""::""")[0])
snake_case__ = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case__ = None
@classmethod
def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
return super()._strip_protocol(UpperCamelCase__).lstrip("""/""")
def __magic_name__ ( self : Dict):
'''simple docstring'''
if self.dir_cache is None:
snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
snake_case__ = {f["""name"""]: f}
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
return self.file.open().read()
def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = self._strip_protocol(UpperCamelCase__)
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''bz2'''
_lowercase : Dict = '''bz2'''
_lowercase : Optional[int] = '''.bz2'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''gzip'''
_lowercase : List[str] = '''gzip'''
_lowercase : Any = '''.gz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = '''lz4'''
_lowercase : List[Any] = '''lz4'''
_lowercase : Dict = '''.lz4'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''xz'''
_lowercase : Union[str, Any] = '''xz'''
_lowercase : Optional[int] = '''.xz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''zstd'''
_lowercase : Tuple = '''zstd'''
_lowercase : Union[str, Any] = '''.zst'''
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = file_
def __enter__( self : List[str]):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__)
def __iter__( self : Any):
'''simple docstring'''
return iter(self._file)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return next(self._file)
def __getattr__( self : Any , UpperCamelCase__ : int):
'''simple docstring'''
return getattr(self._file , UpperCamelCase__)
def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__))
snake_case__ = fixed_enter
| 654 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_snake_case : List[str] = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = R"\w+[.]\d+"
__snake_case : Dict = re.findall(__lowerCamelCase , __lowerCamelCase )
for pat in pats:
__snake_case : Optional[Any] = key.replace(__lowerCamelCase , "_".join(pat.split("." ) ) )
return key
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Union[str, Any] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__snake_case : List[str] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__snake_case : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__snake_case : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__snake_case : Any = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__snake_case : List[str] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__snake_case : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
__snake_case : Dict = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__snake_case : Tuple = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__snake_case : Optional[Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=4_2 ):
# Step 1: Convert pytorch tensor to numpy
__snake_case : str = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__snake_case : str = flax_model.init_weights(PRNGKey(__lowerCamelCase ) )
__snake_case : Union[str, Any] = flatten_dict(__lowerCamelCase )
__snake_case : List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__snake_case : List[str] = rename_key(__lowerCamelCase )
__snake_case : Optional[int] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
__snake_case , __snake_case : List[str] = rename_key_and_reshape_tensor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
__snake_case : Dict = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
| 81 |
def _UpperCAmelCase ( a : int ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 0 |
"""simple docstring"""
from collections import deque
class lowercase__ :
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
UpperCAmelCase_ = process_name # process name
UpperCAmelCase_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase_ = arrival_time
UpperCAmelCase_ = burst_time # remaining burst time
UpperCAmelCase_ = 0 # total time of the process wait in ready queue
UpperCAmelCase_ = 0 # time from arrival time to completion time
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : list[int] , _UpperCAmelCase : deque[Process] , _UpperCAmelCase : int , ) -> None:
'''simple docstring'''
UpperCAmelCase_ = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase_ = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase_ = queue
# current time
UpperCAmelCase_ = current_time
# finished process is in this sequence queue
UpperCAmelCase_ = deque()
def lowercase__ ( self : Tuple ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowercase__ ( self : Tuple , _UpperCAmelCase : list[Process] ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowercase__ ( self : List[str] , _UpperCAmelCase : list[Process] ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowercase__ ( self : int , _UpperCAmelCase : list[Process] ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowercase__ ( self : List[Any] , _UpperCAmelCase : deque[Process] ) -> list[int]:
'''simple docstring'''
return [q.burst_time for q in queue]
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Process ) -> int:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowercase__ ( self : Dict , _UpperCAmelCase : deque[Process] ) -> deque[Process]:
'''simple docstring'''
UpperCAmelCase_ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
UpperCAmelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase_ = 0
# set the process's turnaround time because it is finished
UpperCAmelCase_ = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase_ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowercase__ ( self : List[Any] , _UpperCAmelCase : deque[Process] , _UpperCAmelCase : int ) -> tuple[deque[Process], deque[Process]]:
'''simple docstring'''
UpperCAmelCase_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
UpperCAmelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase_ = 0
# set the finish time
UpperCAmelCase_ = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowercase__ ( self : int ) -> deque[Process]:
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCamelCase = Process("""P1""", 0, 53)
lowerCamelCase = Process("""P2""", 0, 17)
lowerCamelCase = Process("""P3""", 0, 68)
lowerCamelCase = Process("""P4""", 0, 24)
lowerCamelCase = 3
lowerCamelCase = [17, 25]
lowerCamelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
lowerCamelCase = Process("""P1""", 0, 53)
lowerCamelCase = Process("""P2""", 0, 17)
lowerCamelCase = Process("""P3""", 0, 68)
lowerCamelCase = Process("""P4""", 0, 24)
lowerCamelCase = 3
lowerCamelCase = [17, 25]
lowerCamelCase = deque([Pa, Pa, Pa, Pa])
lowerCamelCase = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCamelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
F"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 82 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = size
snake_case__ = [0] * size
snake_case__ = [0] * size
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return (index & (index + 1)) - 1
def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = value
while index < self.size:
snake_case__ = self.get_prev(UpperCamelCase__) + 1
if current_left_border == index:
snake_case__ = value
else:
snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self.get_next(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
right -= 1 # Because of right is exclusive
snake_case__ = 0
while left <= right:
snake_case__ = self.get_prev(UpperCamelCase__)
if left <= current_left:
snake_case__ = max(UpperCamelCase__ , self.tree[right])
snake_case__ = current_left
else:
snake_case__ = max(UpperCamelCase__ , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __snake_case :
snake_case__ : Optional[Union[str, Path]] = None
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : Optional[Dict] = None
snake_case__ : Optional[str] = None
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : bool = True
snake_case__ : Optional[int] = None
snake_case__ : int = 1
snake_case__ : Optional[Union[str, bool]] = None
snake_case__ : bool = False
snake_case__ : Optional[Dict] = None
snake_case__ : Optional[str] = None
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(__lowerCAmelCase ) for k, v in self.__dict__.items()} )
| 83 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
_lowercase : List[str] = PegasusConfig
_lowercase : Union[str, Any] = {}
_lowercase : Tuple = '''gelu'''
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return config, inputs_dict
def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder()
snake_case__ = inputs_dict["""input_ids"""]
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict["""attention_mask"""][:1, :]
snake_case__ = inputs_dict["""head_mask"""]
snake_case__ = 1
# first forward pass
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__)
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1)
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1)
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0]
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1]))
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3)
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ):
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_lowercase : List[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Optional[int] = True
_lowercase : Dict = False
_lowercase : Any = False
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = TFPegasusModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__)
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_lowercase : str = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_lowercase : int = '''google/pegasus-xsum'''
@cached_property
def __magic_name__ ( self : Dict):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.translate_src_text(**UpperCamelCase__)
assert self.expected_text == generated_words
def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""")
snake_case__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__)
return generated_words
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 654 | 0 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a__ = logging.get_logger(__name__)
a__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
a__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
a__ = {
"""jukebox""": 5_1_2,
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token
super().__init__(
unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case__ = version
snake_case__ = max_n_lyric_tokens
snake_case__ = n_genres
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 7_9:
snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""")
snake_case__ = regex.compile(UpperCamelCase__)
snake_case__ = {v: k for k, v in self.artists_encoder.items()}
snake_case__ = {v: k for k, v in self.genres_encoder.items()}
snake_case__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists]
for genres in range(len(UpperCamelCase__)):
snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]]
snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
return list(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self._tokenize(UpperCamelCase__)
return artist, genre, lyrics
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False):
'''simple docstring'''
for idx in range(len(self.version)):
if self.version[idx] == "v3":
snake_case__ = artists[idx].lower()
snake_case__ = [genres[idx].lower()]
else:
snake_case__ = self._normalize(artists[idx]) + """.v2"""
snake_case__ = [
self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""")
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""")
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))}
snake_case__ = 0
snake_case__ = len(UpperCamelCase__) + 1
snake_case__ = self.vocab
snake_case__ = {v: k for k, v in self.vocab.items()}
snake_case__ = """"""
else:
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""")
snake_case__ = self._run_strip_accents(UpperCamelCase__)
snake_case__ = lyrics.replace("""\\""" , """\n""")
snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], []
return artists, genres, lyrics
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__)
snake_case__ = []
for char in text:
snake_case__ = unicodedata.category(UpperCamelCase__)
if cat == "Mn":
continue
output.append(UpperCamelCase__)
return "".join(UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = (
[chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)]
+ ["""."""]
)
snake_case__ = frozenset(UpperCamelCase__)
snake_case__ = re.compile(R"""_+""")
snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()])
snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""")
return text
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
return " ".join(UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = TensorType(UpperCamelCase__)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""")
import tensorflow as tf
snake_case__ = tf.constant
snake_case__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""")
import torch
snake_case__ = torch.tensor
snake_case__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""")
import jax.numpy as jnp # noqa: F811
snake_case__ = jnp.array
snake_case__ = _is_jax
else:
snake_case__ = np.asarray
snake_case__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case__ = [inputs]
if not is_tensor(UpperCamelCase__):
snake_case__ = as_tensor(UpperCamelCase__)
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""")
return inputs
def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"):
'''simple docstring'''
snake_case__ = [0, 0, 0]
snake_case__ = [artist] * len(self.version)
snake_case__ = [genres] * len(self.version)
snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = [-INFINITY] * len(full_tokens[-1])
snake_case__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__)
for i in range(len(self.version))
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks})
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__))
return (artists_file, genres_file, lyrics_file)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = self.artists_decoder.get(UpperCamelCase__)
snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index]
snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index]
return artist, genres, lyrics
| 654 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case ( UpperCamelCase_ ):
lowercase_ = 42
lowercase_ = 42
def __init__( self : str , a_ : UNetaDModel , a_ : ScoreSdeVeScheduler )-> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self : Optional[int] , a_ : int = 1 , a_ : int = 2000 , a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a_ : Optional[str] = "pil" , a_ : bool = True , **a_ : int , )-> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.unet.config.sample_size
SCREAMING_SNAKE_CASE__ : Tuple = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE__ : Tuple = self.unet
SCREAMING_SNAKE_CASE__ : Union[str, Any] = randn_tensor(a_ , generator=a_ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : int = sample.to(self.device )
self.scheduler.set_timesteps(a_ )
self.scheduler.set_sigmas(a_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.unet(a_ , a_ ).sample
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler.step_correct(a_ , a_ , generator=a_ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE__ : Any = model(a_ , a_ ).sample
SCREAMING_SNAKE_CASE__ : str = self.scheduler.step_pred(a_ , a_ , a_ , generator=a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE__ : Dict = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : int = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : str = self.numpy_to_pil(a_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=a_ )
| 85 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize):
'''simple docstring'''
snake_case__ = """bilinear"""
snake_case__ = max_size
snake_case__ = short_edge_length
def __call__( self : List[str] , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = []
for img in imgs:
snake_case__ , snake_case__ = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__)
if h < w:
snake_case__ , snake_case__ = size, scale * w
else:
snake_case__ , snake_case__ = scale * h, size
if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size:
snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = newh * scale
snake_case__ = neww * scale
snake_case__ = int(neww + 0.5)
snake_case__ = int(newh + 0.5)
if img.dtype == np.uinta:
snake_case__ = Image.fromarray(UpperCamelCase__)
snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
snake_case__ = np.asarray(UpperCamelCase__)
else:
snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
snake_case__ = nn.functional.interpolate(
UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0)
img_augs.append(UpperCamelCase__)
return img_augs
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
snake_case__ = cfg.INPUT.FORMAT
snake_case__ = cfg.SIZE_DIVISIBILITY
snake_case__ = cfg.PAD_VALUE
snake_case__ = cfg.INPUT.MAX_SIZE_TEST
snake_case__ = cfg.MODEL.DEVICE
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images]))
snake_case__ = [im.shape[-2:] for im in images]
snake_case__ = [
nn.functional.pad(
UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase__ , UpperCamelCase__)
]
return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__)
def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = [images]
if single_image:
assert len(UpperCamelCase__) == 1
for i in range(len(UpperCamelCase__)):
if isinstance(images[i] , torch.Tensor):
images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
snake_case__ = torch.tensor([im.shape[:2] for im in images])
snake_case__ = self.aug(UpperCamelCase__)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case__ = [self.normalizer(UpperCamelCase__) for x in images]
# now pad them to do the following operations
snake_case__ , snake_case__ = self.pad(UpperCamelCase__)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _UpperCAmelCase ( a : Optional[Any] , a : Any ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ):
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
snake_case__ , snake_case__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 654 | 0 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a :List[str] = logging.get_logger(__name__)
__a :Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__a :Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ = None
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
elif name.split("." )[0] == "proj":
A_ = fairseq_model.proj
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name:
A_ = "bias"
elif "weight" in name:
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ , A_ = emb.weight.shape
A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase )
A_ = emb.weight.data
return lin_layer
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.split(" " )[0] for line in lines]
A_ = len(__UpperCamelCase )
A_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,):
"""simple docstring"""
A_ = WavaVecaConfig.from_pretrained(__UpperCamelCase )
A_ = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase ,vocab_size=__UpperCamelCase ,decoder_layers=__UpperCamelCase ,do_stable_layer_norm=__UpperCamelCase )
A_ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ = model[0].eval()
# set weights for wav2vec2 encoder
A_ = WavaVecaModel(__UpperCamelCase )
A_ = recursively_load_weights_wavaveca(model.encoder ,__UpperCamelCase )
A_ = SpeechaTextaForCausalLM(__UpperCamelCase )
A_ , A_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__UpperCamelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A_ = SpeechEncoderDecoderModel(encoder=__UpperCamelCase ,decoder=__UpperCamelCase )
A_ = False
# add projection layer
A_ = nn.Parameter(projection_layer.weight )
A_ = nn.Parameter(projection_layer.bias )
A_ = create_vocab_dict(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,"vocab.json" ) ,"w" ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase ,"vocab.json" ) )
tokenizer.save_pretrained(__UpperCamelCase )
A_ = hf_wavavec.config.to_dict()
A_ = tokenizer.pad_token_id
A_ = tokenizer.bos_token_id
A_ = tokenizer.eos_token_id
A_ = "speech_to_text_2"
A_ = "wav2vec2"
A_ = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__a :Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 86 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''wavlm'''
def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = conv_bias
snake_case__ = num_buckets
snake_case__ = max_bucket_distance
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim)
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 654 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : List[Any]=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Union[str, Any]=37 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : int=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : int=None , ) ->Optional[int]:
'''simple docstring'''
A__ = parent
A__ = 13
A__ = 7
A__ = True
A__ = True
A__ = True
A__ = True
A__ = 99
A__ = 384
A__ = 2
A__ = 4
A__ = 37
A__ = '''gelu'''
A__ = 0.1
A__ = 0.1
A__ = 512
A__ = 16
A__ = 2
A__ = 0.02
A__ = 3
A__ = 4
A__ = 128
A__ = 2
A__ = 9
A__ = 1
A__ = None
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = ids_tensor([self.batch_size] , self.num_choices)
A__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any]) ->List[Any]:
'''simple docstring'''
A__ = TFConvBertModel(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = [input_ids, input_mask]
A__ = model(UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple) ->Tuple:
'''simple docstring'''
A__ = TFConvBertForMaskedLM(config=UpperCAmelCase__)
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str) ->Tuple:
'''simple docstring'''
A__ = self.num_labels
A__ = TFConvBertForSequenceClassification(config=UpperCAmelCase__)
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.num_choices
A__ = TFConvBertForMultipleChoice(config=UpperCAmelCase__)
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str) ->Tuple:
'''simple docstring'''
A__ = self.num_labels
A__ = TFConvBertForTokenClassification(config=UpperCAmelCase__)
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]) ->Any:
'''simple docstring'''
A__ = TFConvBertForQuestionAnswering(config=UpperCAmelCase__)
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
A__ = TFConvBertModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = True
if hasattr(UpperCAmelCase__ , '''use_cache'''):
A__ = True
A__ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
for model_class in self.all_model_classes:
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)
A__ = model_class(UpperCAmelCase__)
A__ = len(model(UpperCAmelCase__))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase__ , saved_model=UpperCAmelCase__)
A__ = os.path.join(UpperCAmelCase__ , '''saved_model''' , '''1''')
A__ = tf.keras.models.load_model(UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
if self.is_encoder_decoder:
A__ = outputs['''encoder_hidden_states''']
A__ = outputs['''encoder_attentions''']
else:
A__ = outputs['''hidden_states''']
A__ = outputs['''attentions''']
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
'''simple docstring'''
A__ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''')
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length)
A__ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
def check_decoder_attentions_output(UpperCAmelCase__ : List[Any]):
A__ = len(UpperCAmelCase__)
self.assertEqual(out_len % 2 , 0)
A__ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase__ : str):
A__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = model_class(UpperCAmelCase__)
A__ = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__))
A__ = len(UpperCAmelCase__)
self.assertEqual(config.output_hidden_states , UpperCAmelCase__)
check_encoder_attentions_output(UpperCAmelCase__)
if self.is_encoder_decoder:
A__ = model_class(UpperCAmelCase__)
A__ = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__))
self.assertEqual(config.output_hidden_states , UpperCAmelCase__)
check_decoder_attentions_output(UpperCAmelCase__)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__))
self.assertEqual(config.output_hidden_states , UpperCAmelCase__)
check_encoder_attentions_output(UpperCAmelCase__)
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase__))
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase__)
check_encoder_attentions_output(UpperCAmelCase__)
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
'''simple docstring'''
A__ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''')
A__ = tf.constant([[0, 1, 2, 3, 4, 5]])
A__ = model(UpperCAmelCase__)[0]
A__ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase__)
A__ = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)
| 87 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : UNetaDModel
_lowercase : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__)
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
snake_case__ = self.unet.config.sample_size
snake_case__ = (batch_size, 3, img_size, img_size)
snake_case__ = self.unet
snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma
snake_case__ = sample.to(self.device)
self.scheduler.set_timesteps(UpperCamelCase__)
self.scheduler.set_sigmas(UpperCamelCase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# prediction step
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__)
snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean
snake_case__ = sample_mean.clamp(0 , 1)
snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__)
| 654 | 0 |
"""simple docstring"""
from math import isqrt, loga
def _snake_case ( __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowerCamelCase : Optional[int] = False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = degree * loga(__snake_case )
_lowerCamelCase : Union[str, Any] = int(__snake_case )
_lowerCamelCase : Dict = calculate_prime_numbers(__snake_case )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Any = len(__snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def __magic_name__ ( self : int):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : str):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 654 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_lowercase : List[Any] = dict(zip(lowerCamelCase, range(len(lowerCamelCase))))
_lowercase : Tuple = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_lowercase : Any = {'unk_token': '<unk>'}
_lowercase : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
_lowercase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(lowerCamelCase) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(lowerCamelCase))
_lowercase : Optional[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_lowercase : Optional[int] = os.path.join(self.tmpdirname, lowerCamelCase)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> int:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Any = [np.random.randint(2_55, size=(3, 30, 4_00), dtype=np.uinta)]
_lowercase : Union[str, Any] = [Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1)) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Dict = self.get_rust_tokenizer()
_lowercase : Dict = self.get_image_processor()
_lowercase : Tuple = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
processor_slow.save_pretrained(self.tmpdirname)
_lowercase : Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase)
_lowercase : List[Any] = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
processor_fast.save_pretrained(self.tmpdirname)
_lowercase : str = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase)
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase)
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowercase : str = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
_lowercase : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0)
_lowercase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCamelCase, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, lowerCamelCase)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = self.get_image_processor()
_lowercase : int = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : Optional[int] = self.prepare_image_inputs()
_lowercase : str = image_processor(lowerCamelCase, return_tensors='np')
_lowercase : Optional[int] = processor(images=lowerCamelCase, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : int = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : str = 'lower newer'
_lowercase : Dict = processor(text=lowerCamelCase)
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.get_image_processor()
_lowercase : Tuple = self.get_tokenizer()
_lowercase : int = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : List[Any] = 'lower newer'
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Any = processor(text=lowerCamelCase, images=lowerCamelCase)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase):
processor()
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = self.get_image_processor()
_lowercase : Any = self.get_tokenizer()
_lowercase : Tuple = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : str = self.prepare_image_inputs()
_lowercase : str = self.prepare_image_inputs()
_lowercase : Union[str, Any] = processor(images=lowerCamelCase, visual_prompt=lowerCamelCase)
self.assertListEqual(list(inputs.keys()), ['pixel_values', 'conditional_pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase):
processor()
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = self.get_image_processor()
_lowercase : Dict = self.get_tokenizer()
_lowercase : List[Any] = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : Optional[int] = processor.batch_decode(lowerCamelCase)
_lowercase : List[str] = tokenizer.batch_decode(lowerCamelCase)
self.assertListEqual(lowerCamelCase, lowerCamelCase)
| 89 |
a__ = [0, 2, 4, 6, 8]
a__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ = 0
for digit in range(10 ):
snake_case__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a , a )
return result
snake_case__ = 0
for digita in range(10 ):
snake_case__ = digita
if (remainder + digita) % 2 == 0:
snake_case__ = ODD_DIGITS
else:
snake_case__ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , )
return result
def _UpperCAmelCase ( a : int = 9 ):
snake_case__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a , 0 , [0] * length , a )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _snake_case ( A , A , A , A , A ) -> np.array:
lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(A ):
lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] )
lowerCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = '''facebook/nllb-200-distilled-600M'''
_lowercase : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase : Optional[int] = '''translator'''
_lowercase : Optional[Any] = AutoTokenizer
_lowercase : Dict = AutoModelForSeqaSeqLM
_lowercase : List[str] = LANGUAGE_CODES
_lowercase : Optional[Any] = ['''text''', '''text''', '''text''']
_lowercase : Tuple = ['''text''']
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
snake_case__ = self.lang_to_code[src_lang]
snake_case__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__)
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.model.generate(**UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
| 654 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[str] = BioGptTokenizer
_lowerCamelCase: Tuple = False
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int:
A = 'lower newer'
A = 'lower newer'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = BioGptTokenizer(self.vocab_file ,self.merges_file )
A = 'lower'
A = ['low', 'er</w>']
A = tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
A = tokens + ['<unk>']
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ )
A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a ) | 91 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( a : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = module
snake_case__ = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , )
snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str):
'''simple docstring'''
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : Any = 2.109_6595_5269_2574
_lowercase : Tuple = '''Hello my name is'''
_lowercase : List[Any] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : List[str] = 10
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = AutoTokenizer.from_pretrained(self.model_name)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : str):
'''simple docstring'''
super().setUp()
# Models and tokenizer
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""")
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config"""))
snake_case__ = config.to_dict()
snake_case__ = config.to_diff_dict()
snake_case__ = config.to_json_string()
def __magic_name__ ( self : Dict):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
snake_case__ = self.model_fpaa.get_memory_footprint()
snake_case__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
snake_case__ = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
snake_case__ = True
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__):
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__):
# Tries with `str`
self.model_abit.to("""cpu""")
with self.assertRaises(UpperCamelCase__):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0"""))
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_fpaa.to(torch.floataa)
snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
snake_case__ = self.model_fpaa.to("""cpu""")
# Check this does not throw an error
snake_case__ = self.model_fpaa.half()
# Check this does not throw an error
snake_case__ = self.model_fpaa.float()
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""")
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __magic_name__ ( cls : Optional[Any]):
'''simple docstring'''
snake_case__ = """t5-small"""
snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
snake_case__ = AutoTokenizer.from_pretrained(cls.model_name)
snake_case__ = """Translate in German: Hello, my dog is cute"""
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any):
'''simple docstring'''
from transformers import TaForConditionalGeneration
snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules
snake_case__ = None
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
snake_case__ = modules
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
super().setUp()
# model_name
snake_case__ = """bigscience/bloom-560m"""
snake_case__ = """t5-small"""
# Different types of model
snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Sequence classification model
snake_case__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# CausalLM model
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Seq2seq model
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : List[str]):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
snake_case__ = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""")
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
# Second real batch
snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = """facebook/opt-350m"""
super().setUp()
def __magic_name__ ( self : Any):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""):
return
# Step 1: freeze all parameters
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
snake_case__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
snake_case__ = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__)):
snake_case__ = LoRALayer(module.q_proj , rank=1_6)
snake_case__ = LoRALayer(module.k_proj , rank=1_6)
snake_case__ = LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
snake_case__ = model.forward(**UpperCamelCase__)
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(UpperCamelCase__ , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = '''gpt2-xl'''
_lowercase : Any = 3.3191_8548_5415_2187
| 654 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'bridgetower_vision_model'
def __init__( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Tuple=288 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : int=1E-05 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=False , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : List[str] =hidden_size
lowercase : Optional[Any] =num_hidden_layers
lowercase : Dict =num_channels
lowercase : Optional[int] =patch_size
lowercase : Union[str, Any] =image_size
lowercase : Optional[int] =initializer_factor
lowercase : List[Any] =layer_norm_eps
lowercase : Dict =stop_gradient
lowercase : int =share_layernorm
lowercase : Any =remove_last_layer
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase , lowercase : Tuple =cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
if config_dict.get('''model_type''' ) == "bridgetower":
lowercase : Dict =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'bridgetower_text_model'
def __init__( self : Any , UpperCAmelCase__ : Tuple=50265 , UpperCAmelCase__ : List[Any]=768 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : List[Any]=3072 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Union[str, Any]=514 , UpperCAmelCase__ : int=1 , UpperCAmelCase__ : Optional[int]=1E-05 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Union[str, Any]="absolute" , UpperCAmelCase__ : Dict=True , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : str =vocab_size
lowercase : Union[str, Any] =hidden_size
lowercase : int =num_hidden_layers
lowercase : Dict =num_attention_heads
lowercase : Dict =hidden_act
lowercase : Tuple =initializer_factor
lowercase : str =intermediate_size
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : Any =max_position_embeddings
lowercase : List[str] =type_vocab_size
lowercase : Any =layer_norm_eps
lowercase : Optional[int] =position_embedding_type
lowercase : Optional[Any] =use_cache
lowercase : List[str] =pad_token_id
lowercase : Optional[Any] =bos_token_id
lowercase : Union[str, Any] =eos_token_id
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] =cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
if config_dict.get('''model_type''' ) == "bridgetower":
lowercase : int =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'bridgetower'
def __init__( self : Dict , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : str=1E-05 , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : List[str]="add" , UpperCAmelCase__ : str=12 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Tuple , ):
'''simple docstring'''
# TODO: remove this once the Hub files are updated.
lowercase : List[str] =kwargs.pop('''text_config_dict''' , UpperCAmelCase__ )
lowercase : Tuple =kwargs.pop('''vision_config_dict''' , UpperCAmelCase__ )
super().__init__(**UpperCAmelCase__ )
lowercase : Optional[Any] =share_cross_modal_transformer_layers
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_size
lowercase : Optional[int] =initializer_factor
lowercase : Optional[Any] =layer_norm_eps
lowercase : Tuple =share_link_tower_layers
lowercase : Optional[Any] =link_tower_type
lowercase : Dict =num_attention_heads
lowercase : List[Any] =num_hidden_layers
lowercase : Dict =tie_word_embeddings
lowercase : Any =init_layernorm_from_vision_encoder
if text_config is None:
lowercase : List[str] ={}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
lowercase : List[str] ={}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
lowercase : Optional[int] =BridgeTowerTextConfig(**UpperCAmelCase__ )
lowercase : List[str] =BridgeTowerVisionConfig(**UpperCAmelCase__ )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , UpperCAmelCase__ : BridgeTowerTextConfig , UpperCAmelCase__ : BridgeTowerVisionConfig , **UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =copy.deepcopy(self.__dict__ )
lowercase : int =self.text_config.to_dict()
lowercase : Tuple =self.vision_config.to_dict()
lowercase : int =self.__class__.model_type
return output
| 92 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a__ = """"""
a__ = """"""
a__ = """"""
a__ = 1 # (0 is vertical, 1 is horizontal)
def _UpperCAmelCase ( ):
snake_case__ , snake_case__ = get_dataset(a , a )
print("""Processing...""" )
snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ = random_chars(32 )
snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(a )} with {file_name}''' )
snake_case__ = []
for anno in new_annos[index]:
snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(a )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = []
snake_case__ = []
for label_file in glob.glob(os.path.join(a , """*.txt""" ) ):
snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(a ) as in_file:
snake_case__ = in_file.readlines()
snake_case__ = os.path.join(a , F'''{label_name}.jpg''' )
snake_case__ = []
for obj_list in obj_lists:
snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _UpperCAmelCase ( a : list , a : list , a : int = 1 ):
snake_case__ = []
snake_case__ = []
snake_case__ = []
for idx in range(len(a ) ):
snake_case__ = []
snake_case__ = img_list[idx]
path_list.append(a )
snake_case__ = anno_list[idx]
snake_case__ = cva.imread(a )
if flip_type == 1:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _UpperCAmelCase ( a : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 654 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :Dict = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=_SCREAMING_SNAKE_CASE )
env_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
launch_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
tpu_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
test_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
# Let's go
lowerCAmelCase__ :str = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 93 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a__ = 5_0_0_0_0_0
a__ , a__ = os.path.split(__file__)
a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ):
snake_case__ = dataset.map(**a )
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ):
snake_case__ = dataset.filter(**a )
def _UpperCAmelCase ( ):
snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case__ = generate_example_dataset(
os.path.join(a , """dataset.arrow""" ) , a , num_examples=a )
snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a )
def tokenize(a : Union[str, Any] ):
return tokenizer(examples["""text"""] )
snake_case__ = map(a )
snake_case__ = map(a , batched=a )
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""numpy""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""pandas""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
snake_case__ = map(a , function=a , batched=a )
snake_case__ = filter(a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a , """wb""" ) as f:
f.write(json.dumps(a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 654 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowercase_ ( __A : Dict ) -> Optional[int]:
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowercase_ ( ) -> str:
"""simple docstring"""
lowercase : Any =ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=__A )
lowercase : Tuple =parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__A )
EnvironmentCommand.register_subcommand(__A )
TestCommand.register_subcommand(__A )
RunBeamCommand.register_subcommand(__A )
DummyDataCommand.register_subcommand(__A )
# Parse args
lowercase , lowercase : List[str] =parser.parse_known_args()
if not hasattr(__A , '''func''' ):
parser.print_help()
exit(1 )
lowercase : Optional[Any] =parse_unknown_args(__A )
# Run
lowercase : Optional[int] =args.func(__A , **__A )
service.run()
if __name__ == "__main__":
main()
| 94 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : List[str] , a : Any=False ):
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = """"""
else:
snake_case__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ):
snake_case__ = dct.pop(a )
snake_case__ = val
def _UpperCAmelCase ( ):
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : List[str] , a : Tuple ):
snake_case__ = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ = 1000
snake_case__ = """huggingface/label-files"""
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(deit_name[-6:-4] )
snake_case__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(a , pretrained=a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
snake_case__ = create_rename_keys(a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ = encoding["""pixel_values"""]
snake_case__ = model(a )
snake_case__ = timm_model(a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 654 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def snake_case ( A__ ):
UpperCAmelCase_ : str = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ : Union[str, Any] = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ : Union[str, Any] = 0.01
with locka.acquire():
with pytest.raises(A__ ):
UpperCAmelCase_ : Union[str, Any] = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def snake_case ( A__ ):
UpperCAmelCase_ : int = "a" * 10_00 + ".lock"
UpperCAmelCase_ : List[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
UpperCAmelCase_ : Optional[int] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 95 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : torch.FloatTensor
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = num_attention_heads * attention_head_dim
snake_case__ = additional_embeddings
snake_case__ = time_embed_dim or inner_dim
snake_case__ = embedding_proj_dim or embedding_dim
snake_case__ = clip_embed_dim or embedding_dim
snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0)
snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if embedding_proj_norm_type is None:
snake_case__ = None
elif embedding_proj_norm_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''')
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if encoder_hid_proj_type is None:
snake_case__ = None
elif encoder_hid_proj_type == "linear":
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''')
snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__))
if added_emb_type == "prd":
snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__))
elif added_emb_type is None:
snake_case__ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''')
snake_case__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__)
])
if norm_in_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
elif norm_in_type is None:
snake_case__ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''')
snake_case__ = nn.LayerNorm(UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
snake_case__ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__)
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]):
if hasattr(UpperCamelCase__ , """set_processor"""):
snake_case__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
snake_case__ = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]):
if hasattr(UpperCamelCase__ , """set_processor"""):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = hidden_states.shape[0]
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.time_proj(UpperCamelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ = timesteps_projected.to(dtype=self.dtype)
snake_case__ = self.time_embedding(UpperCamelCase__)
if self.embedding_proj_norm is not None:
snake_case__ = self.embedding_proj_norm(UpperCamelCase__)
snake_case__ = self.embedding_proj(UpperCamelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
snake_case__ = self.proj_in(UpperCamelCase__)
snake_case__ = self.positional_embedding.to(hidden_states.dtype)
snake_case__ = []
snake_case__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
snake_case__ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
snake_case__ = hidden_states[:, None, :]
snake_case__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1)
additional_embeds.append(UpperCamelCase__)
snake_case__ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0)
snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
snake_case__ = self.norm_in(UpperCamelCase__)
for block in self.transformer_blocks:
snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__)
snake_case__ = self.norm_out(UpperCamelCase__)
if self.prd_embedding is not None:
snake_case__ = hidden_states[:, -1]
else:
snake_case__ = hidden_states[:, additional_embeddings_len:]
snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654 | 0 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__lowerCamelCase = object()
# For specifying empty leaf dict `{}`
__lowerCamelCase = object()
def a ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple ) -> int:
__magic_name__: str = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__UpperCAmelCase ) - len(__UpperCAmelCase ) + 1 ):
__magic_name__: Dict = [x.match(__UpperCAmelCase ) for x, y in zip(__UpperCAmelCase , ks[i:] )]
if matches and all(__UpperCAmelCase ):
return True
return False
def a ( __UpperCAmelCase : Dict ) -> Optional[Any]:
def replace(__UpperCAmelCase : Any , __UpperCAmelCase : List[str] ):
for rule, replacement in rules:
if _match(__UpperCAmelCase , __UpperCAmelCase ):
return replacement
return val
return replace
def a ( ) -> str:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , __UpperCAmelCase )),
(("transformer", "wte", "embedding"), P("""mp""" , __UpperCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__UpperCAmelCase , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , __UpperCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__UpperCAmelCase , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , __UpperCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a ( __UpperCAmelCase : Union[str, Any] ) -> Dict:
__magic_name__: Optional[Any] = _get_partition_rules()
__magic_name__: Any = _replacement_rules(__UpperCAmelCase )
__magic_name__: int = {k: _unmatched for k in flatten_dict(__UpperCAmelCase )}
__magic_name__: int = {k: replace(__UpperCAmelCase , __UpperCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__UpperCAmelCase ) )
| 96 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a__ = ["""gpt2"""]
a__ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = tokenizer
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__)
snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),))
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.tokenizer(UpperCamelCase__)
snake_case__ = tokenized["""input_ids"""].to_tensor()
snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
super().setUp()
snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
snake_case__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1]))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""")
snake_case__ = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case__ = python_outputs[key].numpy()
snake_case__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values))
@slow
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.function(UpperCamelCase__)
for test_inputs in self.test_sentences:
snake_case__ = tf.constant(UpperCamelCase__)
snake_case__ = compiled_tokenizer(UpperCamelCase__)
snake_case__ = tf_tokenizer(UpperCamelCase__)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = ModelToSave(tokenizer=UpperCamelCase__)
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case__ = Path(UpperCamelCase__) / """saved.model"""
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving})
snake_case__ = tf.saved_model.load(UpperCamelCase__)
snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def __magic_name__ ( self : Tuple):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs
snake_case__ = tf_tokenizer.get_config()
snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__)
snake_case__ = model_from_config(UpperCamelCase__)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def __magic_name__ ( self : Dict):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case__ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__)
snake_case__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 654 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__a = trt.Logger(trt.Logger.WARNING)
__a = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__a = logging.getLogger(__name__)
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__a = parser.parse_args()
if args.tokenizer_name:
__a = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__a = args.per_device_eval_batch_size
__a = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__a = True
__a = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__a = 'temp_engine/bert-fp16.engine'
if args.inta:
__a = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__a = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__a = [network.get_input(i) for i in range(network.num_inputs)]
__a = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__a = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__a = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__a = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def a ( snake_case__: str , snake_case__: List[str] , snake_case__: List[Any] , snake_case__: List[str] , snake_case__: Optional[int] , snake_case__: Optional[Any] , snake_case__: List[str] , snake_case__: List[Any] ):
'''simple docstring'''
lowercase_ = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
lowercase_ = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
lowercase_ = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , snake_case__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , snake_case__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , snake_case__ )
# start time
lowercase_ = time.time()
# Run inference
context.execute_async(
bindings=[int(snake_case__ ) for d_inp in d_inputs] + [int(snake_case__ ), int(snake_case__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(snake_case__ , snake_case__ , snake_case__ )
cuda.memcpy_dtoh_async(snake_case__ , snake_case__ , snake_case__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase_ = time.time()
lowercase_ = end_time - start_time
lowercase_ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__a = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__a = raw_datasets['validation'].column_names
__a = 'question' if 'question' in column_names else column_names[0]
__a = 'context' if 'context' in column_names else column_names[1]
__a = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__a = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__a = min(args.max_seq_length, tokenizer.model_max_length)
def a ( snake_case__: Tuple ):
'''simple docstring'''
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowercase_ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase_ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=snake_case__ , stride=args.doc_stride , return_overflowing_tokens=snake_case__ , return_offsets_mapping=snake_case__ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase_ = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase_ = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase_ = tokenized_examples.sequence_ids(snake_case__ )
lowercase_ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase_ = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase_ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__a = raw_datasets['validation']
# Validation Feature Creation
__a = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__a = default_data_collator
__a = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__a = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def a ( snake_case__: Tuple , snake_case__: int , snake_case__: Union[str, Any] , snake_case__: Optional[int]="eval" ):
'''simple docstring'''
# Post-processing: we match the start logits and end logits to answers in the original context.
lowercase_ = postprocess_qa_predictions(
examples=snake_case__ , features=snake_case__ , predictions=snake_case__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=snake_case__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase_ = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
lowercase_ = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
lowercase_ = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=snake_case__ , label_ids=snake_case__ )
__a = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def a ( snake_case__: Dict ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(snake_case__ ) ) * engine.get_binding_dtype(snake_case__ ).itemsize
# Allocate device memory for inputs and outputs.
__a = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__a = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__a = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__a = cuda.mem_alloc(h_outputa.nbytes)
__a = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__a = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
__a = 0.0
__a = 0
__a = timeit.default_timer()
__a = None
for step, batch in enumerate(eval_dataloader):
__a , __a = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__a , __a = outputs
__a = torch.tensor(start_logits)
__a = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__a = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
__a = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
__a = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__a = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
__a = nested_truncate(all_preds, len(eval_dataset))
__a = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
__a = post_processing_function(eval_examples, eval_dataset, all_preds)
__a = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 97 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = (IPNDMScheduler,)
_lowercase : int = (('''num_inference_steps''', 50),)
def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = {"""num_train_timesteps""": 1_0_0_0}
config.update(**UpperCamelCase__)
return config
def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = 1_0
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
return sample
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""):
scheduler.set_timesteps(UpperCamelCase__)
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
| 654 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['PoolFormerFeatureExtractor']
lowercase__ : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 98 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
_lowercase : Dict = '''CIDAS/clipseg-rd64-refined'''
_lowercase : List[Any] = '''image_segmenter'''
_lowercase : Tuple = CLIPSegForImageSegmentation
_lowercase : str = ['''image''', '''text''']
_lowercase : Dict = ['''image''']
def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
requires_backends(self , ["""vision"""])
super().__init__(*UpperCamelCase__ , **UpperCamelCase__)
def __magic_name__ ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors="""pt""")
def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
with torch.no_grad():
snake_case__ = self.model(**UpperCamelCase__).logits
return logits
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = outputs.cpu().detach().numpy()
snake_case__ = 0
snake_case__ = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 654 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def a ():
__a = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=lowerCAmelCase__ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=lowerCAmelCase__ , default=5 )
parser.add_argument("""--batch_size""" , type=lowerCAmelCase__ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=lowerCAmelCase__ , default=1 )
parser.add_argument("""--freeze""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument("""--learning_rate""" , type=lowerCAmelCase__ , default=5E-4 )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=lowerCAmelCase__ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=lowerCAmelCase__ , default=10 )
parser.add_argument("""--weight_decay""" , type=lowerCAmelCase__ , default=0.0_1 )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase__ , default="""./results""" )
return parser.parse_args()
SCREAMING_SNAKE_CASE = load('accuracy')
def a (lowerCAmelCase__ ):
__a , __a = eval_pred
__a = np.argmax(lowerCAmelCase__ , axis=1 )
return metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , __A ):
super().__init__()
__a = trainer
def snake_case_ ( self , __A , __A , __A , **__A ):
if control.should_evaluate:
__a = deepcopy(__A )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def a ():
__a = get_args()
set_seed(args.seed )
__a = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
__a = dataset.train_test_split(test_size=0.2 )
__a = train_test["""test"""].train_test_split(test_size=0.5 )
__a = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
__a = AutoTokenizer.from_pretrained(args.model_ckpt )
__a = tokenizer.eos_token
__a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__a = False
__a = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(lowerCAmelCase__ ):
__a = tokenizer(example["""src"""] , truncation=lowerCAmelCase__ , max_length=1_024 )
__a = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__a = train_test_validation.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=train_test_validation["""train"""].column_names , )
__a = DataCollatorWithPadding(tokenizer=lowerCAmelCase__ )
__a = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
__a = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(lowerCAmelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 99 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=1_8 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=True , ):
'''simple docstring'''
snake_case__ = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = apply_ocr
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessingTester(self)
@property
def __magic_name__ ( self : Tuple):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize"""))
self.assertTrue(hasattr(UpperCamelCase__ , """size"""))
self.assertTrue(hasattr(UpperCamelCase__ , """apply_ocr"""))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8})
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2})
def __magic_name__ ( self : List[str]):
'''simple docstring'''
pass
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase__)
self.assertIsInstance(encoding.boxes , UpperCamelCase__)
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""")
snake_case__ = Image.open(ds[0]["""file"""]).convert("""RGB""")
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase__)
self.assertListEqual(encoding.boxes , UpperCamelCase__)
# with apply_OCR = False
snake_case__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__)
snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
| 654 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A : Any = logging.get_logger(__name__)
_A : Union[str, Any] = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : str = """focalnet"""
def __init__( self , A_=2_24 , A_=4 , A_=3 , A_=96 , A_=False , A_=[1_92, 3_84, 7_68, 7_68] , A_=[2, 2, 6, 2] , A_=[2, 2, 2, 2] , A_=[3, 3, 3, 3] , A_="gelu" , A_=4.0 , A_=0.0 , A_=0.1 , A_=False , A_=1E-4 , A_=False , A_=False , A_=False , A_=0.02 , A_=1E-5 , A_=32 , A_=None , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = use_conv_embed
SCREAMING_SNAKE_CASE__ = hidden_sizes
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = focal_levels
SCREAMING_SNAKE_CASE__ = focal_windows
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = use_layerscale
SCREAMING_SNAKE_CASE__ = layerscale_value
SCREAMING_SNAKE_CASE__ = use_post_layernorm
SCREAMING_SNAKE_CASE__ = use_post_layernorm_in_modulation
SCREAMING_SNAKE_CASE__ = normalize_modulator
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = encoder_stride
SCREAMING_SNAKE_CASE__ = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 100 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = params
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array([len(UpperCamelCase__) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , UpperCamelCase__ : Any):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any]):
'''simple docstring'''
return len(self.lengths)
def __magic_name__ ( self : str):
'''simple docstring'''
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.params.max_model_input_size
snake_case__ = self.lengths > max_len
logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''')
def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple):
return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)]
snake_case__ = []
snake_case__ = []
if self.params.mlm:
snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
snake_case__ = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__)
if sub_s[-1] != sep_id:
snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__)
assert len(UpperCamelCase__) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCamelCase__)
new_tok_ids.extend(UpperCamelCase__)
new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs])
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array(UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = len(self)
snake_case__ = self.lengths > 1_1
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''')
def __magic_name__ ( self : List[str]):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = len(self)
snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
snake_case__ = (unk_occs / self.lengths) < 0.5
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''')
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self)} sequences''')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = [t[0] for t in batch]
snake_case__ = [t[1] for t in batch]
assert len(UpperCamelCase__) == len(UpperCamelCase__)
# Max for paddings
snake_case__ = max(UpperCamelCase__)
# Pad token ids
if self.params.mlm:
snake_case__ = self.params.special_tok_ids["""pad_token"""]
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids]
assert len(tk_) == len(UpperCamelCase__)
assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_)
snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_)
snake_case__ = torch.tensor(UpperCamelCase__) # (bs)
return tk_t, lg_t
| 654 | 0 |
def a__ ( A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = len(A__ ), len(grid[0] )
if (
min(A__, A__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE_ : Dict = 0
count += depth_first_search(A__, row + 1, A__, A__ )
count += depth_first_search(A__, row - 1, A__, A__ )
count += depth_first_search(A__, A__, col + 1, A__ )
count += depth_first_search(A__, A__, col - 1, A__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _UpperCAmelCase ( a : str ):
if "model" in orig_key:
snake_case__ = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1]
snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
snake_case__ = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
snake_case__ = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
snake_case__ = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
snake_case__ = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
snake_case__ = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
snake_case__ = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
snake_case__ = """yoso.""" + orig_key
return orig_key
def _UpperCAmelCase ( a : Tuple , a : Dict ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(a )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case__ = val
snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""]
snake_case__ = torch.arange(a ).expand((1, -1) ) + 2
return orig_state_dict
def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ):
snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""]
snake_case__ = YosoConfig.from_json_file(a )
snake_case__ = YosoForMaskedLM(a )
snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a )
print(model.load_state_dict(a ) )
model.eval()
model.save_pretrained(a )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 654 | 0 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase : str = """"""
UpperCamelCase : Dict = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(SCREAMING_SNAKE_CASE ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase , UpperCamelCase : int = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase : List[Any] = [1 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase : Optional[Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : Union[str, Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(SCREAMING_SNAKE_CASE )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase : int = j - k + 1 # noqa: E741
UpperCamelCase : List[Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase : List[str] = length[j]
UpperCamelCase : List[str] = j
# create that string
UpperCamelCase : Optional[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ''''''
_lowercase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase : str = None # compression type in fsspec. ex: "gzip"
_lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
super().__init__(self , **UpperCamelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ = os.path.basename(self.file.path.split("""::""")[0])
snake_case__ = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case__ = None
@classmethod
def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
return super()._strip_protocol(UpperCamelCase__).lstrip("""/""")
def __magic_name__ ( self : Dict):
'''simple docstring'''
if self.dir_cache is None:
snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
snake_case__ = {f["""name"""]: f}
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
return self.file.open().read()
def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = self._strip_protocol(UpperCamelCase__)
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''bz2'''
_lowercase : Dict = '''bz2'''
_lowercase : Optional[int] = '''.bz2'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''gzip'''
_lowercase : List[str] = '''gzip'''
_lowercase : Any = '''.gz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = '''lz4'''
_lowercase : List[Any] = '''lz4'''
_lowercase : Dict = '''.lz4'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''xz'''
_lowercase : Union[str, Any] = '''xz'''
_lowercase : Optional[int] = '''.xz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''zstd'''
_lowercase : Tuple = '''zstd'''
_lowercase : Union[str, Any] = '''.zst'''
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = file_
def __enter__( self : List[str]):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__)
def __iter__( self : Any):
'''simple docstring'''
return iter(self._file)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return next(self._file)
def __getattr__( self : Any , UpperCamelCase__ : int):
'''simple docstring'''
return getattr(self._file , UpperCamelCase__)
def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__))
snake_case__ = fixed_enter
| 654 | 0 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def snake_case ( lowerCAmelCase_ ) -> int:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase_ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(lowerCAmelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
def _UpperCAmelCase ( a : int ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=18 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=400 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , SCREAMING_SNAKE_CASE__=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , SCREAMING_SNAKE_CASE__=True , ) -> List[Any]:
A__ = size if size is not None else {"height": 224, "width": 224}
A__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_convert_rgb
def snake_case__ ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ) -> Tuple:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
A__ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
A__ = []
for i in range(self.batch_size ):
A__ , A__ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
A__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
A__ = [torch.from_numpy(SCREAMING_SNAKE_CASE__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCamelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def snake_case__ ( self ) -> str:
A__ = ChineseCLIPImageProcessingTester(self , do_center_crop=SCREAMING_SNAKE_CASE__ )
@property
def snake_case__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ) -> Tuple:
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_convert_rgb" ) )
def snake_case__ ( self ) -> List[str]:
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def snake_case__ ( self ) -> str:
pass
def snake_case__ ( self ) -> str:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case__ ( self ) -> List[str]:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case__ ( self ) -> int:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class UpperCamelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : str = ChineseCLIPImageProcessor if is_vision_available() else None
def snake_case__ ( self ) -> Dict:
A__ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=SCREAMING_SNAKE_CASE__ )
A__ = 3
@property
def snake_case__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ) -> Dict:
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_convert_rgb" ) )
def snake_case__ ( self ) -> Dict:
pass
def snake_case__ ( self ) -> Optional[Any]:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 104 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = size
snake_case__ = [0] * size
snake_case__ = [0] * size
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return (index & (index + 1)) - 1
def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = value
while index < self.size:
snake_case__ = self.get_prev(UpperCamelCase__) + 1
if current_left_border == index:
snake_case__ = value
else:
snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self.get_next(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
right -= 1 # Because of right is exclusive
snake_case__ = 0
while left <= right:
snake_case__ = self.get_prev(UpperCamelCase__)
if left <= current_left:
snake_case__ = max(UpperCamelCase__ , self.tree[right])
snake_case__ = current_left
else:
snake_case__ = max(UpperCamelCase__ , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' ,snake_case__ ,)
super().__init__(*snake_case__ ,**snake_case__ )
| 105 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
_lowercase : List[str] = PegasusConfig
_lowercase : Union[str, Any] = {}
_lowercase : Tuple = '''gelu'''
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return config, inputs_dict
def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder()
snake_case__ = inputs_dict["""input_ids"""]
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict["""attention_mask"""][:1, :]
snake_case__ = inputs_dict["""head_mask"""]
snake_case__ = 1
# first forward pass
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__)
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1)
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1)
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0]
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1]))
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3)
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ):
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_lowercase : List[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Optional[int] = True
_lowercase : Dict = False
_lowercase : Any = False
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = TFPegasusModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__)
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_lowercase : str = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_lowercase : int = '''google/pegasus-xsum'''
@cached_property
def __magic_name__ ( self : Dict):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.translate_src_text(**UpperCamelCase__)
assert self.expected_text == generated_words
def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""")
snake_case__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__)
return generated_words
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 654 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
A = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
A = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
A = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
A = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
A = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
A = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
A = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
A = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
A = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
A = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
A = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
A = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
A = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
A = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
A = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
A = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
A = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
A = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
A = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
A = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
A = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
A = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
A = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
A = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A = key.split('.' )
A , A = int(key_split[2] ), int(key_split[4] )
A = config.vision_config.hidden_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A = key.split('.' )
A = int(key_split[3] )
A = config.text_config.hidden_size
if "weight" in key:
A = val[:dim, :]
A = val[
dim : dim * 2, :
]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
else:
A = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
A = val.squeeze_()
else:
A = val
return orig_state_dict
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]="groupvit-gcc-yfcc" , lowerCAmelCase__ : str=False ) -> Any:
'''simple docstring'''
A = GroupViTConfig()
A = GroupViTModel(lowerCAmelCase__ ).eval()
A = torch.load(lowerCAmelCase__ , map_location='cpu' )['model']
A = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
A , A = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
A = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
A = prepare_img()
A = processor(text=['a photo of a cat', 'a photo of a dog'] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='pt' )
with torch.no_grad():
A = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
A = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
A = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print('Successfully saved processor and model to' , lowerCAmelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(lowerCAmelCase__ , organization='nielsr' )
model.push_to_hub(lowerCAmelCase__ , organization='nielsr' )
if __name__ == "__main__":
__snake_case :List[Any] =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
__snake_case :Any =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 106 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a__ = logging.get_logger(__name__)
a__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
a__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
a__ = {
"""jukebox""": 5_1_2,
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token
super().__init__(
unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case__ = version
snake_case__ = max_n_lyric_tokens
snake_case__ = n_genres
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 7_9:
snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""")
snake_case__ = regex.compile(UpperCamelCase__)
snake_case__ = {v: k for k, v in self.artists_encoder.items()}
snake_case__ = {v: k for k, v in self.genres_encoder.items()}
snake_case__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists]
for genres in range(len(UpperCamelCase__)):
snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]]
snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
return list(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self._tokenize(UpperCamelCase__)
return artist, genre, lyrics
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False):
'''simple docstring'''
for idx in range(len(self.version)):
if self.version[idx] == "v3":
snake_case__ = artists[idx].lower()
snake_case__ = [genres[idx].lower()]
else:
snake_case__ = self._normalize(artists[idx]) + """.v2"""
snake_case__ = [
self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""")
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""")
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))}
snake_case__ = 0
snake_case__ = len(UpperCamelCase__) + 1
snake_case__ = self.vocab
snake_case__ = {v: k for k, v in self.vocab.items()}
snake_case__ = """"""
else:
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""")
snake_case__ = self._run_strip_accents(UpperCamelCase__)
snake_case__ = lyrics.replace("""\\""" , """\n""")
snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], []
return artists, genres, lyrics
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__)
snake_case__ = []
for char in text:
snake_case__ = unicodedata.category(UpperCamelCase__)
if cat == "Mn":
continue
output.append(UpperCamelCase__)
return "".join(UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = (
[chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)]
+ ["""."""]
)
snake_case__ = frozenset(UpperCamelCase__)
snake_case__ = re.compile(R"""_+""")
snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()])
snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""")
return text
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
return " ".join(UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = TensorType(UpperCamelCase__)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""")
import tensorflow as tf
snake_case__ = tf.constant
snake_case__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""")
import torch
snake_case__ = torch.tensor
snake_case__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""")
import jax.numpy as jnp # noqa: F811
snake_case__ = jnp.array
snake_case__ = _is_jax
else:
snake_case__ = np.asarray
snake_case__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case__ = [inputs]
if not is_tensor(UpperCamelCase__):
snake_case__ = as_tensor(UpperCamelCase__)
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""")
return inputs
def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"):
'''simple docstring'''
snake_case__ = [0, 0, 0]
snake_case__ = [artist] * len(self.version)
snake_case__ = [genres] * len(self.version)
snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = [-INFINITY] * len(full_tokens[-1])
snake_case__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__)
for i in range(len(self.version))
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks})
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__))
return (artists_file, genres_file, lyrics_file)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = self.artists_decoder.get(UpperCamelCase__)
snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index]
snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index]
return artist, genres, lyrics
| 654 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : List[str] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : Optional[Any] ):
inspect_dataset(__snake_case , __snake_case )
_A = path + '.py'
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Dict ):
inspect_metric(__snake_case , __snake_case )
_A = path + '.py'
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Any , __snake_case : Union[str, Any] ):
_A = get_dataset_config_info(__snake_case , config_name=__snake_case )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : List[str] ):
with pytest.raises(__snake_case ):
get_dataset_config_info(__snake_case , config_name=__snake_case )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Optional[Any] ):
_A = get_dataset_config_names(__snake_case )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Optional[Any] ):
_A = get_dataset_infos(__snake_case )
assert list(infos.keys() ) == expected_configs
_A = expected_configs[0]
assert expected_config in infos
_A = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Optional[int] , __snake_case : List[Any] ):
_A = get_dataset_infos(__snake_case )
assert expected_config in infos
_A = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Any ):
with pytest.raises(__snake_case ):
get_dataset_split_names(__snake_case , config_name=__snake_case )
| 107 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize):
'''simple docstring'''
snake_case__ = """bilinear"""
snake_case__ = max_size
snake_case__ = short_edge_length
def __call__( self : List[str] , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = []
for img in imgs:
snake_case__ , snake_case__ = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__)
if h < w:
snake_case__ , snake_case__ = size, scale * w
else:
snake_case__ , snake_case__ = scale * h, size
if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size:
snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = newh * scale
snake_case__ = neww * scale
snake_case__ = int(neww + 0.5)
snake_case__ = int(newh + 0.5)
if img.dtype == np.uinta:
snake_case__ = Image.fromarray(UpperCamelCase__)
snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
snake_case__ = np.asarray(UpperCamelCase__)
else:
snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
snake_case__ = nn.functional.interpolate(
UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0)
img_augs.append(UpperCamelCase__)
return img_augs
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
snake_case__ = cfg.INPUT.FORMAT
snake_case__ = cfg.SIZE_DIVISIBILITY
snake_case__ = cfg.PAD_VALUE
snake_case__ = cfg.INPUT.MAX_SIZE_TEST
snake_case__ = cfg.MODEL.DEVICE
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images]))
snake_case__ = [im.shape[-2:] for im in images]
snake_case__ = [
nn.functional.pad(
UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase__ , UpperCamelCase__)
]
return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__)
def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = [images]
if single_image:
assert len(UpperCamelCase__) == 1
for i in range(len(UpperCamelCase__)):
if isinstance(images[i] , torch.Tensor):
images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
snake_case__ = torch.tensor([im.shape[:2] for im in images])
snake_case__ = self.aug(UpperCamelCase__)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case__ = [self.normalizer(UpperCamelCase__) for x in images]
# now pad them to do the following operations
snake_case__ , snake_case__ = self.pad(UpperCamelCase__)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _UpperCAmelCase ( a : Optional[Any] , a : Any ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ):
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
snake_case__ , snake_case__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 654 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__a: Optional[Any] = re.compile(R'''\s+''')
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> str:
return {"hash": hashlib.mda(re.sub(__snake_case , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Dict:
_UpperCAmelCase = [len(__snake_case ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(__snake_case ), "line_max": max(__snake_case )}
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Dict:
_UpperCAmelCase = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Dict:
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case=5 ) -> Optional[int]:
_UpperCAmelCase = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_UpperCAmelCase = example["""content"""].splitlines()
for _, line in zip(range(__snake_case ) , __snake_case ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case=5 , __snake_case=0.05 ) -> Union[str, Any]:
_UpperCAmelCase = ["""unit tests""", """test file""", """configuration file"""]
_UpperCAmelCase = example["""content"""].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(__snake_case ) , __snake_case ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example["""content"""].count("""\n""" )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Union[str, Any]:
_UpperCAmelCase = ["""def """, """class """, """for """, """while """]
_UpperCAmelCase = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case=4 ) -> Any:
_UpperCAmelCase = example["""content"""].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> List[str]:
_UpperCAmelCase = tokenizer(example["""content"""] , truncation=__snake_case )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(__snake_case )
return {"ratio": ratio}
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Optional[Any]:
_UpperCAmelCase = {}
results.update(get_hash(__snake_case ) )
results.update(line_stats(__snake_case ) )
results.update(alpha_stats(__snake_case ) )
results.update(char_token_ratio(__snake_case ) )
results.update(is_autogenerated(__snake_case ) )
results.update(is_config_or_test(__snake_case ) )
results.update(has_no_keywords(__snake_case ) )
results.update(has_few_assignments(__snake_case ) )
return results
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> int:
if not check_uniques(__snake_case , __snake_case ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Optional[Any]:
with open(__snake_case , """rb""" ) as f_in:
with gzip.open(str(__snake_case ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(__snake_case , __snake_case )
os.unlink(__snake_case )
# Settings
__a: Optional[int] = HfArgumentParser(PreprocessingArguments)
__a: List[Any] = parser.parse_args()
if args.num_workers is None:
__a: Tuple = multiprocessing.cpu_count()
__a: Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__a: Optional[Any] = time.time()
__a: Dict = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
__a: Dict = time.time()
__a: str = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
__a: int = set(ds.unique('''hash'''))
__a: Tuple = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
__a: Optional[Any] = time.time()
__a: Optional[Any] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__a: Optional[int] = time.time()
__a , __a: str = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
__a: int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__a: Optional[Any] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__a: List[Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__a: Optional[int] = str(data_dir / F"file-{file_number+1:012}.json")
__a: List[str] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 108 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''wavlm'''
def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = conv_bias
snake_case__ = num_buckets
snake_case__ = max_bucket_distance
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim)
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 654 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __a ( metaclass=_snake_case ):
__UpperCamelCase : Optional[Any] = ['onnx']
def __init__( self : List[str] ,*lowerCamelCase : str ,**lowerCamelCase : Dict ):
'''simple docstring'''
requires_backends(self ,["""onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] ,*lowerCamelCase : str ,**lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple ,*lowerCamelCase : Dict ,**lowerCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls ,["""onnx"""] )
| 109 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : UNetaDModel
_lowercase : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__)
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
snake_case__ = self.unet.config.sample_size
snake_case__ = (batch_size, 3, img_size, img_size)
snake_case__ = self.unet
snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma
snake_case__ = sample.to(self.device)
self.scheduler.set_timesteps(UpperCamelCase__)
self.scheduler.set_sigmas(UpperCamelCase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# prediction step
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__)
snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean
snake_case__ = sample_mean.clamp(0 , 1)
snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__)
| 654 | 0 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
UpperCamelCase__ = data_utils.TransfoXLTokenizer
UpperCamelCase__ = data_utils.TransfoXLCorpus
UpperCamelCase__ = data_utils
UpperCamelCase__ = data_utils
def UpperCAmelCase ( snake_case : Dict , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case , '''rb''' ) as fp:
_lowerCAmelCase:Optional[int] = pickle.load(snake_case , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_lowerCAmelCase:List[Any] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
_lowerCAmelCase:str = corpus.vocab.__dict__
torch.save(snake_case , snake_case )
_lowerCAmelCase:Dict = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , snake_case )
_lowerCAmelCase:Union[str, Any] = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(snake_case , snake_case )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_lowerCAmelCase:List[Any] = os.path.abspath(snake_case )
_lowerCAmelCase:Tuple = os.path.abspath(snake_case )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_lowerCAmelCase:Any = TransfoXLConfig()
else:
_lowerCAmelCase:Tuple = TransfoXLConfig.from_json_file(snake_case )
print(F'Building PyTorch model from configuration: {config}' )
_lowerCAmelCase:Tuple = TransfoXLLMHeadModel(snake_case )
_lowerCAmelCase:List[str] = load_tf_weights_in_transfo_xl(snake_case , snake_case , snake_case )
# Save pytorch-model
_lowerCAmelCase:Optional[int] = os.path.join(snake_case , snake_case )
_lowerCAmelCase:int = os.path.join(snake_case , snake_case )
print(F'Save PyTorch model to {os.path.abspath(snake_case )}' )
torch.save(model.state_dict() , snake_case )
print(F'Save configuration file to {os.path.abspath(snake_case )}' )
with open(snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
UpperCamelCase__ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 227 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def __magic_name__ ( self : int):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : str):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 654 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> Optional[int]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = f'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase__ )
if number < 1:
lowercase = f'Input value of [number={number}] must be > 0'
raise ValueError(lowerCAmelCase__ )
lowercase = 1
for i in range(1 , lowerCAmelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
a__ = [0, 2, 4, 6, 8]
a__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ = 0
for digit in range(10 ):
snake_case__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a , a )
return result
snake_case__ = 0
for digita in range(10 ):
snake_case__ = digita
if (remainder + digita) % 2 == 0:
snake_case__ = ODD_DIGITS
else:
snake_case__ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , )
return result
def _UpperCAmelCase ( a : int = 9 ):
snake_case__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a , 0 , [0] * length , a )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 | 0 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
class UpperCAmelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 1
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ = 2000 , SCREAMING_SNAKE_CASE_ = 0.15 , SCREAMING_SNAKE_CASE_ = 0.01 , SCREAMING_SNAKE_CASE_ = 1348.0 , SCREAMING_SNAKE_CASE_ = 1E-5 , SCREAMING_SNAKE_CASE_ = 1 , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = sigma_max
# setable values
lowerCamelCase_ = None
self.set_sigmas(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple:
'''simple docstring'''
return sample
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ) -> int:
'''simple docstring'''
lowerCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase_ = torch.linspace(1 , UpperCamelCase__ , UpperCamelCase__ , device=UpperCamelCase__ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase_ = torch.exp(torch.linspace(math.log(UpperCamelCase__ ) , math.log(UpperCamelCase__ ) , UpperCamelCase__ ) )
lowerCamelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , ) -> Union[str, Any]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
lowerCamelCase_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase_ = timesteps.to(self.discrete_sigmas.device )
lowerCamelCase_ = self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase_ = self.get_adjacent_sigma(UpperCamelCase__ , UpperCamelCase__ ).to(sample.device )
lowerCamelCase_ = torch.zeros_like(UpperCamelCase__ )
lowerCamelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase_ = diffusion.unsqueeze(-1 )
lowerCamelCase_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase_ = randn_tensor(
sample.shape , layout=sample.layout , generator=UpperCamelCase__ , device=sample.device , dtype=sample.dtype )
lowerCamelCase_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=UpperCamelCase__ , prev_sample_mean=UpperCamelCase__ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , ) -> Union[str, Any]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=UpperCamelCase__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCamelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCamelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase_ = step_size.unsqueeze(-1 )
lowerCamelCase_ = sample + step_size * model_output
lowerCamelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = timesteps.to(original_samples.device )
lowerCamelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(UpperCamelCase__ ) * sigmas[:, None, None, None]
)
lowerCamelCase_ = noise + original_samples
return noisy_samples
def __len__( self ) -> List[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 42 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = '''facebook/nllb-200-distilled-600M'''
_lowercase : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase : Optional[int] = '''translator'''
_lowercase : Optional[Any] = AutoTokenizer
_lowercase : Dict = AutoModelForSeqaSeqLM
_lowercase : List[str] = LANGUAGE_CODES
_lowercase : Optional[Any] = ['''text''', '''text''', '''text''']
_lowercase : Tuple = ['''text''']
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
snake_case__ = self.lang_to_code[src_lang]
snake_case__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__)
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.model.generate(**UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
| 654 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a , __a=2 , __a=True , __a=False , __a=10 , __a=3 , __a=32 * 8 , __a=32 * 8 , __a=4 , __a=64 , ):
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = is_training
lowerCamelCase = use_auxiliary_loss
lowerCamelCase = num_queries
lowerCamelCase = num_channels
lowerCamelCase = min_size
lowerCamelCase = max_size
lowerCamelCase = num_labels
lowerCamelCase = hidden_dim
lowerCamelCase = hidden_dim
def _a (self ):
'''simple docstring'''
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase__ )
lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase__ )
lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase__ ) > 0.5
).float()
lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase__ ) > 0.5).long()
lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _a (self ):
'''simple docstring'''
lowerCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCamelCase = self.num_queries
lowerCamelCase = self.num_labels
lowerCamelCase = [1, 1, 1, 1]
lowerCamelCase = self.num_channels
lowerCamelCase = 64
lowerCamelCase = 1_28
lowerCamelCase = self.hidden_dim
lowerCamelCase = self.hidden_dim
lowerCamelCase = self.hidden_dim
return config
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _a (self , __a , __a ):
'''simple docstring'''
lowerCamelCase = output.encoder_hidden_states
lowerCamelCase = output.pixel_decoder_hidden_states
lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase__ ) , config.decoder_layers )
def _a (self , __a , __a , __a , __a=False ):
'''simple docstring'''
with torch.no_grad():
lowerCamelCase = MaskaFormerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase = model(pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__ )
lowerCamelCase = model(UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase__ , UpperCamelCase__ )
def _a (self , __a , __a , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = MaskaFormerForUniversalSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
def comm_check_on_output(__a ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCamelCase = model(pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__ )
lowerCamelCase = model(UpperCamelCase__ )
comm_check_on_output(UpperCamelCase__ )
lowerCamelCase = model(
pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__ )
comm_check_on_output(UpperCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase):
"""simple docstring"""
_A = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_A = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def _a (self ):
'''simple docstring'''
lowerCamelCase = MaskaFormerModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def _a (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCamelCase__ , **UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*UpperCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _a (self ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _a (self ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _a (self ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _a (self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _a (self ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _a (self ):
'''simple docstring'''
pass
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(UpperCamelCase__ )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@slow
def _a (self ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCamelCase = MaskaFormerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _a (self ):
'''simple docstring'''
lowerCamelCase = (self.model_tester.min_size,) * 2
lowerCamelCase = {
"pixel_values": torch.randn((2, 3, *size) , device=UpperCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=UpperCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=UpperCamelCase__ ).long(),
}
lowerCamelCase = self.model_tester.get_config()
lowerCamelCase = MaskaFormerForUniversalSegmentation(UpperCamelCase__ ).to(UpperCamelCase__ )
lowerCamelCase = model(**UpperCamelCase__ )
self.assertTrue(outputs.loss is not None )
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCamelCase__ , **UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(UpperCamelCase__ ).to(UpperCamelCase__ )
lowerCamelCase = model(**UpperCamelCase__ , output_attentions=UpperCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def _a (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCamelCase = self.all_model_classes[1]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs()
lowerCamelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowerCamelCase = model(UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__ ).loss
loss.backward()
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.all_model_classes[1]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs()
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = model_class(UpperCamelCase__ ).to(UpperCamelCase__ )
model.train()
lowerCamelCase = model(UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__ )
lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a_ : List[str] = 1E-4
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def _a (self ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _a (self ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _a (self ):
'''simple docstring'''
lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(UpperCamelCase__ )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
lowerCamelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
lowerCamelCase = model(**UpperCamelCase__ )
lowerCamelCase = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(UpperCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
lowerCamelCase = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(UpperCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
lowerCamelCase = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(UpperCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCamelCase__ ).eval()
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
lowerCamelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
lowerCamelCase = model(**UpperCamelCase__ )
# masks_queries_logits
lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowerCamelCase = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowerCamelCase = torch.tensor(UpperCamelCase__ ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
# class_queries_logits
lowerCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowerCamelCase = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCamelCase__ ).eval()
lowerCamelCase = self.default_image_processor
lowerCamelCase = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
lowerCamelCase = inputs["pixel_values"].to(UpperCamelCase__ )
lowerCamelCase = [el.to(UpperCamelCase__ ) for el in inputs["mask_labels"]]
lowerCamelCase = [el.to(UpperCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
lowerCamelCase = model(**UpperCamelCase__ )
self.assertTrue(outputs.loss is not None ) | 623 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( a : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = module
snake_case__ = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , )
snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str):
'''simple docstring'''
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : Any = 2.109_6595_5269_2574
_lowercase : Tuple = '''Hello my name is'''
_lowercase : List[Any] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : List[str] = 10
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = AutoTokenizer.from_pretrained(self.model_name)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : str):
'''simple docstring'''
super().setUp()
# Models and tokenizer
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""")
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config"""))
snake_case__ = config.to_dict()
snake_case__ = config.to_diff_dict()
snake_case__ = config.to_json_string()
def __magic_name__ ( self : Dict):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
snake_case__ = self.model_fpaa.get_memory_footprint()
snake_case__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
snake_case__ = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
snake_case__ = True
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__):
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__):
# Tries with `str`
self.model_abit.to("""cpu""")
with self.assertRaises(UpperCamelCase__):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0"""))
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_fpaa.to(torch.floataa)
snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
snake_case__ = self.model_fpaa.to("""cpu""")
# Check this does not throw an error
snake_case__ = self.model_fpaa.half()
# Check this does not throw an error
snake_case__ = self.model_fpaa.float()
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""")
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __magic_name__ ( cls : Optional[Any]):
'''simple docstring'''
snake_case__ = """t5-small"""
snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
snake_case__ = AutoTokenizer.from_pretrained(cls.model_name)
snake_case__ = """Translate in German: Hello, my dog is cute"""
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any):
'''simple docstring'''
from transformers import TaForConditionalGeneration
snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules
snake_case__ = None
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
snake_case__ = modules
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
super().setUp()
# model_name
snake_case__ = """bigscience/bloom-560m"""
snake_case__ = """t5-small"""
# Different types of model
snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Sequence classification model
snake_case__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# CausalLM model
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Seq2seq model
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : List[str]):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
snake_case__ = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""")
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
# Second real batch
snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = """facebook/opt-350m"""
super().setUp()
def __magic_name__ ( self : Any):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""):
return
# Step 1: freeze all parameters
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
snake_case__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
snake_case__ = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__)):
snake_case__ = LoRALayer(module.q_proj , rank=1_6)
snake_case__ = LoRALayer(module.k_proj , rank=1_6)
snake_case__ = LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
snake_case__ = model.forward(**UpperCamelCase__)
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(UpperCamelCase__ , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = '''gpt2-xl'''
_lowercase : Any = 3.3191_8548_5415_2187
| 654 | 0 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , UpperCAmelCase__ : int | None = None ):
'''simple docstring'''
lowercase : str =value
lowercase : List[str] =None # Added in order to delete a node easier
lowercase : Union[str, Any] =None
lowercase : Optional[Any] =None
def __repr__( self : List[str] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , UpperCAmelCase__ : Node | None = None ):
'''simple docstring'''
lowercase : str =root
def __str__( self : Any ):
'''simple docstring'''
return str(self.root )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node | None ):
'''simple docstring'''
if new_children is not None: # reset its kids
lowercase : Optional[int] =node.parent
if node.parent is not None: # reset its parent
if self.is_right(UpperCamelCase__ ): # If it is the right children
lowercase : Optional[int] =new_children
else:
lowercase : Tuple =new_children
else:
lowercase : int =new_children
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Node ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.root is None
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : int =Node(UpperCamelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
lowercase : Tuple =new_node # set its root
else: # Tree is not empty
lowercase : str =self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowercase : Dict =new_node # We insert the new node in a leaf
break
else:
lowercase : Tuple =parent_node.left
else:
if parent_node.right is None:
lowercase : Union[str, Any] =new_node
break
else:
lowercase : Union[str, Any] =parent_node.right
lowercase : Any =parent_node
def lowerCamelCase_ ( self : Optional[int] , *UpperCAmelCase__ : Tuple ):
'''simple docstring'''
for value in values:
self.__insert(UpperCamelCase__ )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
lowercase : List[Any] =self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowercase : Tuple =node.left if value < node.value else node.right
return node
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Node | None = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowercase : List[Any] =self.root
if not self.empty():
while node.right is not None:
lowercase : Optional[int] =node.right
return node
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Node | None = None ):
'''simple docstring'''
if node is None:
lowercase : List[str] =self.root
if self.root is None:
return None
if not self.empty():
lowercase : Tuple =self.root
while node.left is not None:
lowercase : Any =node.left
return node
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : List[Any] =self.search(UpperCamelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(UpperCamelCase__ , UpperCamelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(UpperCamelCase__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(UpperCamelCase__ , node.left )
else:
lowercase : Optional[Any] =self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowercase : List[str] =(
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Node | None ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Tuple=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : list , UpperCAmelCase__ : Node | None ):
'''simple docstring'''
if node:
self.inorder(UpperCamelCase__ , node.left )
arr.append(node.value )
self.inorder(UpperCamelCase__ , node.right )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Node ):
'''simple docstring'''
lowercase : Any =[]
self.inorder(UpperCamelCase__ , UpperCamelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def _lowerCAmelCase ( __magic_name__ : Node | None ) -> Union[str, Any]:
lowercase : int =[]
if curr_node is not None:
lowercase : Dict =postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Tuple =(8, 3, 6, 1, 10, 14, 13, 4, 7)
lowercase : Tuple =BinarySearchTree()
for i in testlist:
t.insert(__magic_name__ )
# Prints all the elements of the list in order traversal
print(__magic_name__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__magic_name__ )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 92 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a__ = """"""
a__ = """"""
a__ = """"""
a__ = 1 # (0 is vertical, 1 is horizontal)
def _UpperCAmelCase ( ):
snake_case__ , snake_case__ = get_dataset(a , a )
print("""Processing...""" )
snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ = random_chars(32 )
snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(a )} with {file_name}''' )
snake_case__ = []
for anno in new_annos[index]:
snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(a )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = []
snake_case__ = []
for label_file in glob.glob(os.path.join(a , """*.txt""" ) ):
snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(a ) as in_file:
snake_case__ = in_file.readlines()
snake_case__ = os.path.join(a , F'''{label_name}.jpg''' )
snake_case__ = []
for obj_list in obj_lists:
snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _UpperCAmelCase ( a : list , a : list , a : int = 1 ):
snake_case__ = []
snake_case__ = []
snake_case__ = []
for idx in range(len(a ) ):
snake_case__ = []
snake_case__ = img_list[idx]
path_list.append(a )
snake_case__ = anno_list[idx]
snake_case__ = cva.imread(a )
if flip_type == 1:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _UpperCAmelCase ( a : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 654 | 0 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[False] * len(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =[-1] * len(__SCREAMING_SNAKE_CASE )
def dfs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_UpperCamelCase =True
_UpperCamelCase =c
for u in graph[v]:
if not visited[u]:
dfs(__SCREAMING_SNAKE_CASE , 1 - c )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if not visited[i]:
dfs(__SCREAMING_SNAKE_CASE , 0 )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__lowerCamelCase : Any = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 404 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a__ = 5_0_0_0_0_0
a__ , a__ = os.path.split(__file__)
a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ):
snake_case__ = dataset.map(**a )
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ):
snake_case__ = dataset.filter(**a )
def _UpperCAmelCase ( ):
snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case__ = generate_example_dataset(
os.path.join(a , """dataset.arrow""" ) , a , num_examples=a )
snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a )
def tokenize(a : Union[str, Any] ):
return tokenizer(examples["""text"""] )
snake_case__ = map(a )
snake_case__ = map(a , batched=a )
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""numpy""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""pandas""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
snake_case__ = map(a , function=a , batched=a )
snake_case__ = filter(a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a , """wb""" ) as f:
f.write(json.dumps(a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 654 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
for char in word:
lowerCAmelCase__ : List[Any] = ord(lowerCamelCase_)
if not _is_chinese_char(lowerCamelCase_):
return 0
return 1
def lowerCAmelCase__ ( lowerCamelCase_ : List[str]):
'''simple docstring'''
lowerCAmelCase__ : List[str] = set()
for token in tokens:
lowerCAmelCase__ : List[Any] = len(lowerCamelCase_) > 1 and is_chinese(lowerCamelCase_)
if chinese_word:
word_set.add(lowerCamelCase_)
lowerCAmelCase__ : Tuple = list(lowerCamelCase_)
return word_list
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : set()):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowerCAmelCase__ : str = max([len(lowerCamelCase_) for w in chinese_word_set])
lowerCAmelCase__ : int = bert_tokens
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, len(lowerCamelCase_)
while start < end:
lowerCAmelCase__ : int = True
if is_chinese(bert_word[start]):
lowerCAmelCase__ : Any = min(end - start ,lowerCamelCase_)
for i in range(lowerCamelCase_ ,1 ,-1):
lowerCAmelCase__ : Any = ''''''.join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i):
lowerCAmelCase__ : str = '''##''' + bert_word[j]
lowerCAmelCase__ : List[str] = start + i
lowerCAmelCase__ : Tuple = False
break
if single_word:
start += 1
return bert_word
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : LTP ,lowerCamelCase_ : BertTokenizer):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = []
for i in range(0 ,len(lowerCamelCase_) ,100):
lowerCAmelCase__ : Tuple = ltp_tokenizer.pipeline(lines[i : i + 100] ,tasks=['''cws''']).cws
lowerCAmelCase__ : str = [get_chinese_word(lowerCamelCase_) for r in res]
ltp_res.extend(lowerCamelCase_)
assert len(lowerCamelCase_) == len(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = []
for i in range(0 ,len(lowerCamelCase_) ,100):
lowerCAmelCase__ : str = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=512)
bert_res.extend(res['''input_ids'''])
assert len(lowerCamelCase_) == len(lowerCamelCase_)
lowerCAmelCase__ : str = []
for input_ids, chinese_word in zip(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : Dict = []
for id in input_ids:
lowerCAmelCase__ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase_)
input_tokens.append(lowerCamelCase_)
lowerCAmelCase__ : int = add_sub_symbol(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Any = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase_):
if token[:2] == "##":
lowerCAmelCase__ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase_) == 1 and _is_chinese_char(ord(lowerCamelCase_)):
ref_id.append(lowerCamelCase_)
ref_ids.append(lowerCamelCase_)
assert len(lowerCamelCase_) == len(lowerCamelCase_)
return ref_ids
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
with open(args.file_name ,'''r''' ,encoding='''utf-8''') as f:
lowerCAmelCase__ : Optional[int] = f.readlines()
lowerCAmelCase__ : List[Any] = [line.strip() for line in data if len(lowerCamelCase_) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase__ : Tuple = LTP(args.ltp) # faster in GPU device
lowerCAmelCase__ : Optional[int] = BertTokenizer.from_pretrained(args.bert)
lowerCAmelCase__ : Dict = prepare_ref(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
with open(args.save_path ,'''w''' ,encoding='''utf-8''') as f:
lowerCAmelCase__ : Optional[int] = [json.dumps(lowerCamelCase_) + '''\n''' for ref in ref_ids]
f.writelines(lowerCamelCase_)
if __name__ == "__main__":
__snake_case : int =argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__snake_case : str =parser.parse_args()
main(args)
| 647 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : List[str] , a : Any=False ):
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = """"""
else:
snake_case__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ):
snake_case__ = dct.pop(a )
snake_case__ = val
def _UpperCAmelCase ( ):
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : List[str] , a : Tuple ):
snake_case__ = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ = 1000
snake_case__ = """huggingface/label-files"""
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(deit_name[-6:-4] )
snake_case__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(a , pretrained=a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
snake_case__ = create_rename_keys(a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ = encoding["""pixel_values"""]
snake_case__ = model(a )
snake_case__ = timm_model(a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 654 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class a_ ( lowercase_ ):
A = '''facebook/nllb-200-distilled-600M'''
A = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
A = '''translator'''
A = AutoTokenizer
A = AutoModelForSeqaSeqLM
A = LANGUAGE_CODES
A = ['''text''', '''text''', '''text''']
A = ['''text''']
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
SCREAMING_SNAKE_CASE_ = self.lang_to_code[src_lang]
SCREAMING_SNAKE_CASE_ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors='pt' , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ )
def A_( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return self.model.generate(**UpperCamelCase__ )
def A_( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__ )
| 205 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : torch.FloatTensor
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = num_attention_heads * attention_head_dim
snake_case__ = additional_embeddings
snake_case__ = time_embed_dim or inner_dim
snake_case__ = embedding_proj_dim or embedding_dim
snake_case__ = clip_embed_dim or embedding_dim
snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0)
snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if embedding_proj_norm_type is None:
snake_case__ = None
elif embedding_proj_norm_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''')
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if encoder_hid_proj_type is None:
snake_case__ = None
elif encoder_hid_proj_type == "linear":
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''')
snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__))
if added_emb_type == "prd":
snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__))
elif added_emb_type is None:
snake_case__ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''')
snake_case__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__)
])
if norm_in_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
elif norm_in_type is None:
snake_case__ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''')
snake_case__ = nn.LayerNorm(UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
snake_case__ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__)
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]):
if hasattr(UpperCamelCase__ , """set_processor"""):
snake_case__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
snake_case__ = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]):
if hasattr(UpperCamelCase__ , """set_processor"""):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = hidden_states.shape[0]
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.time_proj(UpperCamelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ = timesteps_projected.to(dtype=self.dtype)
snake_case__ = self.time_embedding(UpperCamelCase__)
if self.embedding_proj_norm is not None:
snake_case__ = self.embedding_proj_norm(UpperCamelCase__)
snake_case__ = self.embedding_proj(UpperCamelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
snake_case__ = self.proj_in(UpperCamelCase__)
snake_case__ = self.positional_embedding.to(hidden_states.dtype)
snake_case__ = []
snake_case__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
snake_case__ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
snake_case__ = hidden_states[:, None, :]
snake_case__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1)
additional_embeds.append(UpperCamelCase__)
snake_case__ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0)
snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
snake_case__ = self.norm_in(UpperCamelCase__)
for block in self.transformer_blocks:
snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__)
snake_case__ = self.norm_out(UpperCamelCase__)
if self.prd_embedding is not None:
snake_case__ = hidden_states[:, -1]
else:
snake_case__ = hidden_states[:, additional_embeddings_len:]
snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654 | 0 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase_ :
"""simple docstring"""
def __init__( self : str ):
__lowercase = ''''''
__lowercase = ''''''
__lowercase = []
__lowercase = 0
__lowercase = 2_5_6
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : str ):
__lowercase = cva.imread(UpperCamelCase__ ,0 )
__lowercase = copy.deepcopy(self.img )
__lowercase , __lowercase , __lowercase = plt.hist(self.img.ravel() ,2_5_6 ,[0, 2_5_6] ,label='''x''' )
__lowercase = np.sum(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
__lowercase = x[i] / self.k
self.sk += prk
__lowercase = (self.L - 1) * self.sk
if self.rem != 0:
__lowercase = int(last % last )
__lowercase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCamelCase__ )
__lowercase = int(np.ma.count(self.img ) / self.img[1].size )
__lowercase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__lowercase = self.img[j][i]
if num != self.last_list[num]:
__lowercase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' ,self.img )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
plt.hist(self.img.ravel() ,2_5_6 ,[0, 2_5_6] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
cva.imshow('''Output-Image''' ,self.img )
cva.imshow('''Input-Image''' ,self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 41 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a__ = ["""gpt2"""]
a__ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = tokenizer
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__)
snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),))
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.tokenizer(UpperCamelCase__)
snake_case__ = tokenized["""input_ids"""].to_tensor()
snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
super().setUp()
snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
snake_case__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1]))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""")
snake_case__ = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case__ = python_outputs[key].numpy()
snake_case__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values))
@slow
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.function(UpperCamelCase__)
for test_inputs in self.test_sentences:
snake_case__ = tf.constant(UpperCamelCase__)
snake_case__ = compiled_tokenizer(UpperCamelCase__)
snake_case__ = tf_tokenizer(UpperCamelCase__)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = ModelToSave(tokenizer=UpperCamelCase__)
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case__ = Path(UpperCamelCase__) / """saved.model"""
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving})
snake_case__ = tf.saved_model.load(UpperCamelCase__)
snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def __magic_name__ ( self : Tuple):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs
snake_case__ = tf_tokenizer.get_config()
snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__)
snake_case__ = model_from_config(UpperCamelCase__)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def __magic_name__ ( self : Dict):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case__ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__)
snake_case__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 654 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class a__ ( lowercase_ ):
__lowerCAmelCase = '''bert-generation'''
def __init__( self , _a=50_358 , _a=1_024 , _a=24 , _a=16 , _a=4_096 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0.0_2 , _a=1E-12 , _a=0 , _a=2 , _a=1 , _a="absolute" , _a=True , **_a , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase : Union[str, Any] = vocab_size
lowercase : str = hidden_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : Tuple = num_attention_heads
lowercase : List[Any] = hidden_act
lowercase : int = intermediate_size
lowercase : Any = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : Union[str, Any] = max_position_embeddings
lowercase : Any = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : Tuple = position_embedding_type
lowercase : int = use_cache
| 361 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = (IPNDMScheduler,)
_lowercase : int = (('''num_inference_steps''', 50),)
def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = {"""num_train_timesteps""": 1_0_0_0}
config.update(**UpperCamelCase__)
return config
def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = 1_0
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
return sample
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""):
scheduler.set_timesteps(UpperCamelCase__)
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
| 654 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.